function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
---|---|---|---|---|---|---|
_iter_valid_files
|
Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
# MASKED: _iter_valid_files function (lines 1356-1381)
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
| 1,356 | 1,381 |
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
_count_valid_files_in_directory
|
Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
# MASKED: _count_valid_files_in_directory function (lines 1384-1411)
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
| 1,384 | 1,411 |
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
standardize
|
Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
# MASKED: standardize function (lines 876-921)
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
| 876 | 921 |
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
fit
|
Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
# MASKED: fit function (lines 1037-1107)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
| 1,037 | 1,107 |
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
reroot
|
Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reroot to a subtree, maintaining an input proto index.
reroot is similar to get_descendant_or_error. However, this method allows
you to call create_proto_index(...) later on, that gives you a reference to the
original proto.
"""
from typing import FrozenSet, Optional, Sequence
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
import tensorflow as tf
# MASKED: reroot function (lines 31-49)
def create_proto_index_field(root: expression.Expression,
new_field_name: path.Step
) -> expression.Expression:
return expression_add.add_paths(
root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})
class _RerootRootNodeTensor(prensor.RootNodeTensor):
"""The reroot root node.
This contains a map from a current index to the original index of a proto.
"""
def __init__(self, size: tf.Tensor, input_proto_index: tf.Tensor):
super().__init__(size)
self._input_proto_index = input_proto_index
@property
def input_proto_index(self):
return self._input_proto_index
def _get_proto_index_parent_index(node: prensor.RootNodeTensor):
return tf.range(node.size)
def _get_input_proto_index(node: prensor.RootNodeTensor):
if isinstance(node, _RerootRootNodeTensor):
return node.input_proto_index
return _get_proto_index_parent_index(node)
class _RerootExpression(expression.Expression):
"""Reroot to a new path, maintaining a input proto index."""
def __init__(self, original_root: expression.Expression,
field_name: path.Step):
super().__init__(True, None)
self._field_name = field_name
self._original_root = original_root
self._new_root = original_root.get_child_or_error(field_name)
if self._new_root.type is not None:
raise ValueError("New root must be a message type: {}".format(
str(self._field_name)))
# TODO(martinz): Check that the "original root source expression" has a type
# in (_RerootExpression, prensor._ProtoRootExpression)
# To do this, we need a general technique similar to
# expression_add._is_true_source_expression: however, this should also cover
# intermediate operations like "project".
# Since this check is not present, if it should have fired, there will be
# an error when calculate(...) is called.
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._original_root, self._new_root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[old_root_value, new_root_value] = sources
if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(
new_root_value, prensor.ChildNodeTensor):
old_input_proto_index = _get_input_proto_index(old_root_value)
# Notice that the "gather" operation is similar to promote.
return _RerootRootNodeTensor(
tf.size(new_root_value.parent_index, out_type=tf.int64),
tf.gather(old_input_proto_index, new_root_value.parent_index))
raise ValueError("Source types incorrect")
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _RerootExpression)
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return self._new_root.get_child(field_name)
def known_field_names(self) -> FrozenSet[path.Step]:
return self._new_root.known_field_names()
class _InputProtoIndexExpression(expression.Leaf):
"""A proto index expression."""
def __init__(self, root: expression.Expression):
"""Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
"""
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[root_node] = sources
# The following check ensures not just that we can calculate the value,
# but that no "improper" reroots were done.
if isinstance(root_node, prensor.RootNodeTensor):
return prensor.LeafNodeTensor(
_get_proto_index_parent_index(root_node),
_get_input_proto_index(root_node),
is_repeated=False)
raise ValueError(
"Illegal operation: expected a true root node: got {}".format(
str(root_node)))
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _InputProtoIndexExpression)
|
def reroot(root: expression.Expression,
source_path: path.Path) -> expression.Expression:
"""Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
"""
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
| 31 | 49 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reroot to a subtree, maintaining an input proto index.
reroot is similar to get_descendant_or_error. However, this method allows
you to call create_proto_index(...) later on, that gives you a reference to the
original proto.
"""
from typing import FrozenSet, Optional, Sequence
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
import tensorflow as tf
def reroot(root: expression.Expression,
source_path: path.Path) -> expression.Expression:
"""Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
"""
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
def create_proto_index_field(root: expression.Expression,
new_field_name: path.Step
) -> expression.Expression:
return expression_add.add_paths(
root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})
class _RerootRootNodeTensor(prensor.RootNodeTensor):
"""The reroot root node.
This contains a map from a current index to the original index of a proto.
"""
def __init__(self, size: tf.Tensor, input_proto_index: tf.Tensor):
super().__init__(size)
self._input_proto_index = input_proto_index
@property
def input_proto_index(self):
return self._input_proto_index
def _get_proto_index_parent_index(node: prensor.RootNodeTensor):
return tf.range(node.size)
def _get_input_proto_index(node: prensor.RootNodeTensor):
if isinstance(node, _RerootRootNodeTensor):
return node.input_proto_index
return _get_proto_index_parent_index(node)
class _RerootExpression(expression.Expression):
"""Reroot to a new path, maintaining a input proto index."""
def __init__(self, original_root: expression.Expression,
field_name: path.Step):
super().__init__(True, None)
self._field_name = field_name
self._original_root = original_root
self._new_root = original_root.get_child_or_error(field_name)
if self._new_root.type is not None:
raise ValueError("New root must be a message type: {}".format(
str(self._field_name)))
# TODO(martinz): Check that the "original root source expression" has a type
# in (_RerootExpression, prensor._ProtoRootExpression)
# To do this, we need a general technique similar to
# expression_add._is_true_source_expression: however, this should also cover
# intermediate operations like "project".
# Since this check is not present, if it should have fired, there will be
# an error when calculate(...) is called.
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._original_root, self._new_root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[old_root_value, new_root_value] = sources
if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(
new_root_value, prensor.ChildNodeTensor):
old_input_proto_index = _get_input_proto_index(old_root_value)
# Notice that the "gather" operation is similar to promote.
return _RerootRootNodeTensor(
tf.size(new_root_value.parent_index, out_type=tf.int64),
tf.gather(old_input_proto_index, new_root_value.parent_index))
raise ValueError("Source types incorrect")
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _RerootExpression)
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return self._new_root.get_child(field_name)
def known_field_names(self) -> FrozenSet[path.Step]:
return self._new_root.known_field_names()
class _InputProtoIndexExpression(expression.Leaf):
"""A proto index expression."""
def __init__(self, root: expression.Expression):
"""Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
"""
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[root_node] = sources
# The following check ensures not just that we can calculate the value,
# but that no "improper" reroots were done.
if isinstance(root_node, prensor.RootNodeTensor):
return prensor.LeafNodeTensor(
_get_proto_index_parent_index(root_node),
_get_input_proto_index(root_node),
is_repeated=False)
raise ValueError(
"Illegal operation: expected a true root node: got {}".format(
str(root_node)))
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _InputProtoIndexExpression)
|
has_wildcard
|
Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
|
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
# MASKED: has_wildcard function (lines 67-84)
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
| 67 | 84 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
get_metadata
|
Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
|
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
# MASKED: get_metadata function (lines 227-261)
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
| 227 | 261 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
get_labels
|
Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
|
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
# MASKED: get_labels function (lines 263-277)
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
| 263 | 277 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
get_label
|
Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
|
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
# MASKED: get_label function (lines 279-289)
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
| 279 | 289 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
find_by_workflow_ids
|
Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
|
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
# MASKED: find_by_workflow_ids function (lines 358-397)
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
| 358 | 397 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
find_by_labels
|
Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
|
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
# MASKED: find_by_labels function (lines 399-439)
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
| 399 | 439 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
find
|
Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
|
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
# MASKED: find function (lines 441-493)
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
| 441 | 493 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
|
__next__
|
This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done.
|
"""
Author: Nathan Clack
Date : 2009
Copyright (c) 2009 HHMI. Free downloads and distribution are allowed for any
non-profit research and educational purposes as long as proper credit is given
to the author. All other rights reserved.
"""
from .tests import plot_whiskers
from ui.whiskerdata.trace import Whisker_Seg
from numpy import *
import pdb
from functools import reduce
def load():
from ui.whiskerdata import load_whiskers, load_trajectories
from ui.genetiff import Reader
movie = Reader('data/seq/whisker_data_0140.seq',adjuststipple=1)
w,wid = load_whiskers('seq.whiskers')
#movie = Reader('../../data/W0.tif',adjuststipple=1)
#w,wid = load_whiskers('w0-grid.whiskers')
#w,wid = load_whiskers('whisk-vc/whisk-vc/seq.whiskers')
#movie = Reader('data/JF8410_041808_001.tif',adjuststipple=1)
#w,wid = load_whiskers('test.whiskers')
#movie = Reader('data/lorenz/090519-19a_0035.seq',adjuststipple=1)
#w,wid = load_whiskers('lorenz.whiskers')
#w,wid = load_whiskers('results/seq-hand.whiskers')
#t,tid = load_trajectories('results/seq-hand.trajectories')
return w,movie
def check_bounds(wvd,shape):
for fid, wv in wvd.items():
for i,w in wv.items():
for x,y,t,s in w:
if x<0 or x>=shape[1] or y<0 or y>=shape[0]:
print("out of bounds")
pdb.set_trace()
if not ( w.x.flags.contiguous and w.y.flags.contiguous ):
print("not contiguous")
pdb.set_trace()
def fix(wvd,movie,scale=2, signal_per_pixel = 0, max_dist = 60, max_angle = 20.*pi/180.):
shape = movie[0].shape
for fid,wv in list(wvd.items()):
print(fid)
table = CollisionTable( wv, shape, scale )
r = set( resolution( table, wv ) )
for j,l in choose_gaps(movie[fid],r,signal_per_pixel,max_dist,max_angle):
e = reduce( Whisker_Seg.join, j )
r.discard( j[0] )
r.discard( j[-1] )
r.add(e)
wvd[fid] = dict( [ p for p in enumerate(r) ] )
return wvd
def compute_join_length( px, py, tlow = 0.0, thigh = 1.0 ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
xp2 = polymul( xp, xp )
yp2 = polymul( yp, yp )
p = polyadd( xp2, yp2 )
integrand = lambda t: sqrt( polyval( p, t ) )
return quad(integrand, tlow, thigh) [0]
def compute_join_curvature( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
xpp = polyder( px, 2 )
yp = polyder( py, 1 )
ypp = polyder( py, 2 )
pn = polyadd( polymul( xp, ypp ), polymul( yp, xpp )) #numerator
pd = polyadd( polymul( xp, xp ) , polymul( yp, yp ) ) #denominator
integrand = lambda t: fabs(polyval( pn, t )/( polyval( pd, t )**(1.5)) )
return quad(integrand, 0, 1) [0]
def compute_join_angle( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
integrand = lambda t: arctan2(polyval(yp, t), polyval(xp, t))
return quad(integrand, 0, 1) [0]
def _compute_intensity( im, x, y ):
if ( x<0 ).any() or \
( x>=im.shape[1] ).any() or \
( y<0 ).any() or \
( y>=im.shape[0] ).any():
return inf
p = set( p for p in zip(x,y) )
score = 0
for j,i in p:
score += im[i,j]
return score/len(p)
def compute_join_intensity( im, px, py ):
tt = linspace(0,1,50)
x = array( [round(polyval(px,t)) for t in tt] )
y = array( [round(polyval(px,t)) for t in tt] )
return _compute_intensity(im,x,y)
def compute_join_score( im, px, py, thick = 2 ):
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
a = _compute_intensity(im, ux, uy )
b = _compute_intensity(im, ux + thick*dy/dL , uy - thick*dx/dL )
c = _compute_intensity(im, ux - thick*dy/dL , uy + thick*dx/dL )
return (2*a - b - c)/4.0
def solve_polynomial_join( left, right, reverse = 0):
"""
Solves for a parametric cubic polynomial curve joining the right side of left
to the left side of right. The curve matches slope and position at it's
boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1
being the right.
method: parametric cubic matching position and slope of endpoints.
This ends up being cheap to compute, since the matrix is
known (interval of parameter is always 0 to 1) and so the
inverse can be precomputed.
minv is inverse of m, where:
m = array( [ [ a**3, a**2, a, 1 ],
[ b**3, b**2, b, 1 ],
[ 3*a**2, 2*a , 1, 0 ],
[ 3*b**2, 2*b , 1, 0 ] ] )
is the matrix for the linear system:
m * coeff = v,
with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].
Here a = 0 and b = 1 so m and it's inverse is always the same.
"""
minv = matrix( [[ 2., -2., 1., 1.],
[-3., 3., -2., -1.],
[ 0., 0., 1., 0.],
[ 1., 0., 0., 0.]])
#take care of cases joining very short segements
lr = len(right)
ll = len(left)
#L = length( right.x, right.y ) + length( left.x, left.y )
#dd = hypot( left.x[0] - right.x[-1], left.y[0] - right.y[-1] )
nl = ll/4
nr = lr/4
slope = lambda v: v[ 0] - v[-1] # want the total change over the length
#slope = lambda v: diff(v).mean()
length = lambda x,y: hypot(diff(x),diff(y)).sum() # euclidian distance in pixels
#
# Compute slope at boundary.
# Uses a number of points near the boundary to compute slope.
# Need to account for edge cases where one or both sides
# consist of very few points.
#
if nr < 2 and nl < 2:
lnorm = length( left.x , left.y )
rnorm = length( right.x , right.y )
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nl = 0
nr = lr - 1
elif nr < 2: # use the derivative on the other side
lnorm = length( left.x[:nl], left.y[:nl] )
rnorm = length( right.x , right.y )
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nr = lr - 1
#print dly,dlx,dry,drx
elif nl < 2: # use the derivative on the other side
rnorm = length( right.x[:nr], right.y[:nr] )
lnorm = length( left.x , left.y )
dry = -slope(right.y[:nr] ) / rnorm
drx = -slope(right.x[:nr] ) / rnorm
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
nl = 0
else: # the "normal" case
rnorm = length( right.x[:nr], right.y[:nr] ) # Compute path length of right border region
lnorm = length( left.x[(-nl):], left.y[(-nl):] ) # Compute path length of left border region
dry = -slope(right.y[:nr] ) / rnorm # Compute dy/dl for right side
drx = -slope(right.x[:nr] ) / rnorm # etc...
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
rnorm = hypot( left.x[0] - right.x[0], left.y[0] - right.y[0] )
lnorm = hypot( left.x[-1]- right.x[0], left.y[-1]- right.y[0] )
if not isfinite(dlx): dlx =(left.x[0] - right.x[0])/lnorm
if not isfinite(dly): dly =(left.y[0] - right.y[0])/lnorm
if not isfinite(drx): drx =(left.x[-1] - right.x[0])/rnorm
if not isfinite(dry): dry =(left.y[-1] - right.y[0])/rnorm
if reverse:
dlx = -dlx
dly = -dly
drx = -drx
dry = -dry
ry = right.y[ 0] ## right.y[nr]
ly = left.y[-1 ] ## left.y[-nl]
rx = right.x[ 0] ## right.x[nr]
lx = left.x[-1 ] ## left.x[-nl]
L = hypot( rx-lx, ry-ly ) # Approximate dl/dt
print("L:%g"%L)
yv = matrix( [[ ly ],
[ ry ],
[ dly * L ], # dy/dt = dy/dl * dl/dt
[ dry * L ]])
xv = matrix( [[ lx ],
[ rx ],
[ dlx * L ],
[ drx * L ]])
cx = minv*xv
cy = minv*yv
if not (isfinite(cx).any() and isfinite(cy).any()):
pdb.set_trace()
return [array(t).squeeze() for t in (cx,cy)]
def plot_join(px,py,*args,**kwargs):
from pylab import plot, polyval
tt = linspace(0,1,50)
plot( polyval(px,tt), polyval(py,tt), *args, **kwargs )
def plot_test(px,py,thick=2):
from pylab import plot
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
plot( ux, uy , '.-')
plot( ux + thick*dy/dL , uy - thick*dx/dL ,'-')
plot( ux - thick*dy/dL , uy + thick*dx/dL ,'-' )
def filter_ends( wv, min_score, shape, border = 10 ):
"""
Return candidate ends for joining.
Returns an iterator yielding (Whisker_Seg, side).
"""
maxy, maxx = [x - border for x in shape]
minx, miny = border, border
test_point = lambda x,y: x>minx and x<maxx and y > miny and y < maxy
bordertest = lambda e,side: test_point( e.x[side], e.y[side] )
scoretest = lambda e,side: e.scores[side] > min_score
sides = [0,-1]
for e in wv:
for s in sides:
if bordertest(e,s) and scoretest(e,s):
yield e,s
def plot_candidate_ends(im, wv, min_score, border = 10):
from pylab import plot, imshow, cm, ion,ioff, show, text
left,right = group_ends( list(filter_ends(wv,min_score,im.shape, border)) )
ioff()
#imshow(im,cmap=cm.gray,hold=0)
m = {0:'ro',-1:'gs'}
for i,e in enumerate(left):
s = 0
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
for i,e in enumerate(right):
s = -1
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
show()
ion()
def group_ends( ends ):
return [e for e,s in ends if s == 0], [e for e,s in ends if s == -1]
def end_direction(w, side, n=16):
a = 0
b = min( n, len(w) )
if side != 0:
a = -b
b = -1
dx = diff( w.x[a:b] ).mean()
dy = diff( w.y[a:b] ).mean()
return dx,dy
def make_joining_whisker(px,py,dist,lthick,lscore,rthick,rscore):
w = Whisker_Seg()
tt = linspace(0,1,round(dist))
w.x = polyval(px,tt).astype(float32)
w.y = polyval(py,tt).astype(float32)
w.thick = polyval( [rthick-lthick,lthick], tt ).astype(float32)
w.scores = polyval( [rscore-lscore,lscore], tt ).astype(float32)
return w
def choose_gaps(im,wv, signal_per_pixel = 0.0, max_dist=60, max_angle = pi/4.):
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
theta = lambda w,side: reduce(arctan2, reversed( end_direction(w,side) ) )
dtheta = lambda left,right: fabs(theta(left,0) - theta(right,-1))
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[ 0]-b.x[-1]
dy = a.y[ 0]-b.y[-1]
d = hypot(dx,dy)
dth = dtheta(a,b)
v = end_direction(a,0)
norm = hypot(*v)
proj = dot( v/norm, (dx,dy) )
# jth: angle change from a to direct line joining a,b
jth = fabs(arctan2( hypot(*( dx-proj*v[0]/norm, dy-proj*v[1]/norm )) , proj ))
#print i,j,
#print "\tD: %g Proj: %g Theta: %g"%(d,proj,jth*180/pi)
l=0;
if d < max_dist and jth < max_angle and proj > 0:
px,py = solve_polynomial_join( b, a )
l = compute_join_score(im,px,py)
if l < -signal_per_pixel:
#plot_test(px,py)
print("\tScore: %g Theta: %g"%(l,jth*180/pi))
e = make_joining_whisker(px,py,d,b.thick[-1],b.scores[-1],a.thick[ 0],a.scores[ 0])
yield (b,e,a),l
def gap_measures(im,wv):
pmetric = lambda p: sqrt(dot(p[:-1],p[:-1]))
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
shape = (len(left),len(right) )
d = zeros( shape )
l = zeros( shape )
c = zeros( shape )
cx = zeros( shape )
cy = zeros( shape )
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[0 ]-b.x[-1]
dy = a.y[0 ]-b.y[-1]
d[i,j] = hypot(dx,dy)
px,py = solve_polynomial_join( b, a )
lpx,lpy = solve_polynomial_join( a, a, reverse = 1 )
rpx,rpy = solve_polynomial_join( b, b, reverse = 1 )
cx[i,j] = max( pmetric( px - lpx ) , pmetric( px - rpx ) )
cy[i,j] = max( pmetric( px - lpx ) , pmetric( py - rpy ) )
#l[i,j] = compute_join_length(px,py)
l[i,j] = compute_join_score(im,px,py)
plot_test(px,py)
#c[i,j] = compute_join_curvature(px,py)
#if sqrt( px[0]**2 + py[0]**2 ) < 50.0:
# plot_join(px,py)
return d,l,cx,cy
def trace_overlap(xxx_todo_changeme, xxx_todo_changeme1, thresh = 2.0 ):
# DONE: does not assume that indexes run along same direction
(wa,i) = xxx_todo_changeme
(wb,j) = xxx_todo_changeme1
def dist(ia,ib):
a,b = wa[ia], wb[ib]
return hypot( a[0] - b[0], a[1] - b[1] )
# determine relative direction of indexing
ia,ib = i,j
if ia == len(wa)-1 or ib == len(wb)-1:
if ia != 0 and ib != 0:
dax = wa.x[ia-1] - wa.x[ia]
day = wa.y[ia-1] - wa.y[ia]
dbx = wb.x[ib-1] - wb.x[ib]
dby = wb.y[ib-1] - wb.y[ib]
elif ia == 0:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = - wb.x[ib-1] + wb.x[ib]
dby = - wb.y[ib-1] + wb.y[ib]
elif ib == 0:
dax = - wa.x[ia-1] + wa.x[ia]
day = - wa.y[ia-1] + wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
else:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
stepa = -1; #only need to keep track of one direction
enda = 0;
notend = lambda i,n: i>n
if( abs(dax) > abs(day) ): #determine by x change
if( dax*dbx < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
else: #determine by y change
if( day*dby < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
bnda = [i,i]
bndb = [j,j]
ms = 0
while ms < thresh and notend(ia,enda) and ib > 0:
moves = ( ( ia + stepa, ib - 1 ),
( ia + stepa, ib ),
( ia , ib - 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate( scores ): #choose best move
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == 0:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib - 1 )
while s < last and ib > 1:
ib -= 1
last = s
s = dist( ia, ib - 1 )
elif ib == 0:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[0] = ia
bndb[0] = ib
#flip direction
if stepa == -1:
stepa = 1
enda = len(wa)
notend = lambda i,n:i<n-1
else:
stepa = -1
enda = 0
notend = lambda i,n: i>n
ia,ib = i,j
ms = 0
while ms < thresh and notend(ia,enda) and ib < len(wb)-1:
moves = ( ( ia + stepa, ib + 1 ),
( ia + stepa, ib ),
( ia , ib + 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate(scores):
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == len(wb)-1:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib + 1 )
while s < last and ib < len(wb)-2:
ib += 1
last = s
s = dist( ia, ib + 1 )
elif ib == len(wb)-1:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[1] = ia
bndb[1] = ib
bnda.sort()
return bnda, bndb
def resolution(table, wvd):
rest = set(wvd.values())
match = next(table)
while match:
keep,discard = merge(match)
if discard:
for a in discard:
table.remove( a )
for a in keep:
yield a
for a,i in match:
rest.discard(a)
match = next(table)
for a in rest:
yield a
def pairwise_merge( match ):
overhang = 8
wa = match[0][0]
wb = match[1][0]
bnda, bndb = trace_overlap(*match)
iscomplete = lambda bnd,w: bnd[0] < overhang and bnd[1] >= len(w)-overhang
if iscomplete(bnda,wa) or iscomplete(bndb,wb):
sa = wa.scores.sum()
sb = wb.scores.sum()
if sa > sb:
return wa,None
else:
return None,wb
return None,None
def merge( match ):
dep = dict( [ (e[0],0) for e in match ] )
#iterate through all pairs and mark those who are contained in another whisker
# The pairwise merge should impose a strict ordering
match = list(match)
for i,ma in enumerate(match):
for j,mb in enumerate(match[ (i+1): ]):
ra,rb = pairwise_merge( (ma,mb) )
if ra or rb:
if not ra:
dep[ma[0]] = 1
if not rb:
dep[mb[0]] = 1
# partition into two sets. Those to keep and those to discard.
# Those to keep depend on none of the others.
return [ k for k,v in dep.items() if v==0 ], \
[ k for k,v in dep.items() if v!=0 ]
class CollisionTable(object):
def __init__(self, wvd, shape, scale):
""" `wvd` may be either a dict or list of whiskers """
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = shape[1]/scale
self.topx = lambda p: int(p[0]/scale) + stride * int(p[1]/scale)
self._build_inverse_table( wvd )
def _build_inverse_table(self, wvd ):
g = enumerate(wvd)
if isinstance(wvd, dict):
g = iter(wvd.items())
for i,w in g:
self.add(w)
def update( self, changes ):
""" Changes is a dict mapping old whisker segments to new segments """
last = None
for w,p in changes.items():
self.remove(w)
if p:
self.add(p[0]) # add back ends
self.add(p[-1])
last = p[1]
if last:
self.add(last) # add back last middle
def add(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
self._map.setdefault(px,set()).add( (w,i) )
for i,px in hash(w): # scan back through and remove repeat hits on a pixel
for x in [e for e in self._map[px] if e[0] == w][1:]:
self._map[px].remove(x)
def remove(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
s = self._map.get(px)
if s:
s.discard( (w,i) )
def __iter__(self):
m = next(self)
while m:
yield m
m = next(self)
# MASKED: __next__ function (lines 590-607)
def counts( self ):
tosc = lambda e: e/self._scale
im = zeros(list(map(tosc, self._shape)))
imr = im.ravel()
for px,s in self._map.items():
imr[px] = len(s) #len(set( [e for e,i in s] ))
return im
|
def __next__(self):
""" This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done.
"""
todelete = []
retval = None
for px,s in self._map.items():
todelete.append(px) # get rid of references to visited pixels
if len(s) > 1:
retval = s
break
for k in todelete:
del self._map[k]
return retval
| 590 | 607 |
"""
Author: Nathan Clack
Date : 2009
Copyright (c) 2009 HHMI. Free downloads and distribution are allowed for any
non-profit research and educational purposes as long as proper credit is given
to the author. All other rights reserved.
"""
from .tests import plot_whiskers
from ui.whiskerdata.trace import Whisker_Seg
from numpy import *
import pdb
from functools import reduce
def load():
from ui.whiskerdata import load_whiskers, load_trajectories
from ui.genetiff import Reader
movie = Reader('data/seq/whisker_data_0140.seq',adjuststipple=1)
w,wid = load_whiskers('seq.whiskers')
#movie = Reader('../../data/W0.tif',adjuststipple=1)
#w,wid = load_whiskers('w0-grid.whiskers')
#w,wid = load_whiskers('whisk-vc/whisk-vc/seq.whiskers')
#movie = Reader('data/JF8410_041808_001.tif',adjuststipple=1)
#w,wid = load_whiskers('test.whiskers')
#movie = Reader('data/lorenz/090519-19a_0035.seq',adjuststipple=1)
#w,wid = load_whiskers('lorenz.whiskers')
#w,wid = load_whiskers('results/seq-hand.whiskers')
#t,tid = load_trajectories('results/seq-hand.trajectories')
return w,movie
def check_bounds(wvd,shape):
for fid, wv in wvd.items():
for i,w in wv.items():
for x,y,t,s in w:
if x<0 or x>=shape[1] or y<0 or y>=shape[0]:
print("out of bounds")
pdb.set_trace()
if not ( w.x.flags.contiguous and w.y.flags.contiguous ):
print("not contiguous")
pdb.set_trace()
def fix(wvd,movie,scale=2, signal_per_pixel = 0, max_dist = 60, max_angle = 20.*pi/180.):
shape = movie[0].shape
for fid,wv in list(wvd.items()):
print(fid)
table = CollisionTable( wv, shape, scale )
r = set( resolution( table, wv ) )
for j,l in choose_gaps(movie[fid],r,signal_per_pixel,max_dist,max_angle):
e = reduce( Whisker_Seg.join, j )
r.discard( j[0] )
r.discard( j[-1] )
r.add(e)
wvd[fid] = dict( [ p for p in enumerate(r) ] )
return wvd
def compute_join_length( px, py, tlow = 0.0, thigh = 1.0 ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
xp2 = polymul( xp, xp )
yp2 = polymul( yp, yp )
p = polyadd( xp2, yp2 )
integrand = lambda t: sqrt( polyval( p, t ) )
return quad(integrand, tlow, thigh) [0]
def compute_join_curvature( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
xpp = polyder( px, 2 )
yp = polyder( py, 1 )
ypp = polyder( py, 2 )
pn = polyadd( polymul( xp, ypp ), polymul( yp, xpp )) #numerator
pd = polyadd( polymul( xp, xp ) , polymul( yp, yp ) ) #denominator
integrand = lambda t: fabs(polyval( pn, t )/( polyval( pd, t )**(1.5)) )
return quad(integrand, 0, 1) [0]
def compute_join_angle( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
integrand = lambda t: arctan2(polyval(yp, t), polyval(xp, t))
return quad(integrand, 0, 1) [0]
def _compute_intensity( im, x, y ):
if ( x<0 ).any() or \
( x>=im.shape[1] ).any() or \
( y<0 ).any() or \
( y>=im.shape[0] ).any():
return inf
p = set( p for p in zip(x,y) )
score = 0
for j,i in p:
score += im[i,j]
return score/len(p)
def compute_join_intensity( im, px, py ):
tt = linspace(0,1,50)
x = array( [round(polyval(px,t)) for t in tt] )
y = array( [round(polyval(px,t)) for t in tt] )
return _compute_intensity(im,x,y)
def compute_join_score( im, px, py, thick = 2 ):
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
a = _compute_intensity(im, ux, uy )
b = _compute_intensity(im, ux + thick*dy/dL , uy - thick*dx/dL )
c = _compute_intensity(im, ux - thick*dy/dL , uy + thick*dx/dL )
return (2*a - b - c)/4.0
def solve_polynomial_join( left, right, reverse = 0):
"""
Solves for a parametric cubic polynomial curve joining the right side of left
to the left side of right. The curve matches slope and position at it's
boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1
being the right.
method: parametric cubic matching position and slope of endpoints.
This ends up being cheap to compute, since the matrix is
known (interval of parameter is always 0 to 1) and so the
inverse can be precomputed.
minv is inverse of m, where:
m = array( [ [ a**3, a**2, a, 1 ],
[ b**3, b**2, b, 1 ],
[ 3*a**2, 2*a , 1, 0 ],
[ 3*b**2, 2*b , 1, 0 ] ] )
is the matrix for the linear system:
m * coeff = v,
with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].
Here a = 0 and b = 1 so m and it's inverse is always the same.
"""
minv = matrix( [[ 2., -2., 1., 1.],
[-3., 3., -2., -1.],
[ 0., 0., 1., 0.],
[ 1., 0., 0., 0.]])
#take care of cases joining very short segements
lr = len(right)
ll = len(left)
#L = length( right.x, right.y ) + length( left.x, left.y )
#dd = hypot( left.x[0] - right.x[-1], left.y[0] - right.y[-1] )
nl = ll/4
nr = lr/4
slope = lambda v: v[ 0] - v[-1] # want the total change over the length
#slope = lambda v: diff(v).mean()
length = lambda x,y: hypot(diff(x),diff(y)).sum() # euclidian distance in pixels
#
# Compute slope at boundary.
# Uses a number of points near the boundary to compute slope.
# Need to account for edge cases where one or both sides
# consist of very few points.
#
if nr < 2 and nl < 2:
lnorm = length( left.x , left.y )
rnorm = length( right.x , right.y )
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nl = 0
nr = lr - 1
elif nr < 2: # use the derivative on the other side
lnorm = length( left.x[:nl], left.y[:nl] )
rnorm = length( right.x , right.y )
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nr = lr - 1
#print dly,dlx,dry,drx
elif nl < 2: # use the derivative on the other side
rnorm = length( right.x[:nr], right.y[:nr] )
lnorm = length( left.x , left.y )
dry = -slope(right.y[:nr] ) / rnorm
drx = -slope(right.x[:nr] ) / rnorm
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
nl = 0
else: # the "normal" case
rnorm = length( right.x[:nr], right.y[:nr] ) # Compute path length of right border region
lnorm = length( left.x[(-nl):], left.y[(-nl):] ) # Compute path length of left border region
dry = -slope(right.y[:nr] ) / rnorm # Compute dy/dl for right side
drx = -slope(right.x[:nr] ) / rnorm # etc...
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
rnorm = hypot( left.x[0] - right.x[0], left.y[0] - right.y[0] )
lnorm = hypot( left.x[-1]- right.x[0], left.y[-1]- right.y[0] )
if not isfinite(dlx): dlx =(left.x[0] - right.x[0])/lnorm
if not isfinite(dly): dly =(left.y[0] - right.y[0])/lnorm
if not isfinite(drx): drx =(left.x[-1] - right.x[0])/rnorm
if not isfinite(dry): dry =(left.y[-1] - right.y[0])/rnorm
if reverse:
dlx = -dlx
dly = -dly
drx = -drx
dry = -dry
ry = right.y[ 0] ## right.y[nr]
ly = left.y[-1 ] ## left.y[-nl]
rx = right.x[ 0] ## right.x[nr]
lx = left.x[-1 ] ## left.x[-nl]
L = hypot( rx-lx, ry-ly ) # Approximate dl/dt
print("L:%g"%L)
yv = matrix( [[ ly ],
[ ry ],
[ dly * L ], # dy/dt = dy/dl * dl/dt
[ dry * L ]])
xv = matrix( [[ lx ],
[ rx ],
[ dlx * L ],
[ drx * L ]])
cx = minv*xv
cy = minv*yv
if not (isfinite(cx).any() and isfinite(cy).any()):
pdb.set_trace()
return [array(t).squeeze() for t in (cx,cy)]
def plot_join(px,py,*args,**kwargs):
from pylab import plot, polyval
tt = linspace(0,1,50)
plot( polyval(px,tt), polyval(py,tt), *args, **kwargs )
def plot_test(px,py,thick=2):
from pylab import plot
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
plot( ux, uy , '.-')
plot( ux + thick*dy/dL , uy - thick*dx/dL ,'-')
plot( ux - thick*dy/dL , uy + thick*dx/dL ,'-' )
def filter_ends( wv, min_score, shape, border = 10 ):
"""
Return candidate ends for joining.
Returns an iterator yielding (Whisker_Seg, side).
"""
maxy, maxx = [x - border for x in shape]
minx, miny = border, border
test_point = lambda x,y: x>minx and x<maxx and y > miny and y < maxy
bordertest = lambda e,side: test_point( e.x[side], e.y[side] )
scoretest = lambda e,side: e.scores[side] > min_score
sides = [0,-1]
for e in wv:
for s in sides:
if bordertest(e,s) and scoretest(e,s):
yield e,s
def plot_candidate_ends(im, wv, min_score, border = 10):
from pylab import plot, imshow, cm, ion,ioff, show, text
left,right = group_ends( list(filter_ends(wv,min_score,im.shape, border)) )
ioff()
#imshow(im,cmap=cm.gray,hold=0)
m = {0:'ro',-1:'gs'}
for i,e in enumerate(left):
s = 0
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
for i,e in enumerate(right):
s = -1
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
show()
ion()
def group_ends( ends ):
return [e for e,s in ends if s == 0], [e for e,s in ends if s == -1]
def end_direction(w, side, n=16):
a = 0
b = min( n, len(w) )
if side != 0:
a = -b
b = -1
dx = diff( w.x[a:b] ).mean()
dy = diff( w.y[a:b] ).mean()
return dx,dy
def make_joining_whisker(px,py,dist,lthick,lscore,rthick,rscore):
w = Whisker_Seg()
tt = linspace(0,1,round(dist))
w.x = polyval(px,tt).astype(float32)
w.y = polyval(py,tt).astype(float32)
w.thick = polyval( [rthick-lthick,lthick], tt ).astype(float32)
w.scores = polyval( [rscore-lscore,lscore], tt ).astype(float32)
return w
def choose_gaps(im,wv, signal_per_pixel = 0.0, max_dist=60, max_angle = pi/4.):
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
theta = lambda w,side: reduce(arctan2, reversed( end_direction(w,side) ) )
dtheta = lambda left,right: fabs(theta(left,0) - theta(right,-1))
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[ 0]-b.x[-1]
dy = a.y[ 0]-b.y[-1]
d = hypot(dx,dy)
dth = dtheta(a,b)
v = end_direction(a,0)
norm = hypot(*v)
proj = dot( v/norm, (dx,dy) )
# jth: angle change from a to direct line joining a,b
jth = fabs(arctan2( hypot(*( dx-proj*v[0]/norm, dy-proj*v[1]/norm )) , proj ))
#print i,j,
#print "\tD: %g Proj: %g Theta: %g"%(d,proj,jth*180/pi)
l=0;
if d < max_dist and jth < max_angle and proj > 0:
px,py = solve_polynomial_join( b, a )
l = compute_join_score(im,px,py)
if l < -signal_per_pixel:
#plot_test(px,py)
print("\tScore: %g Theta: %g"%(l,jth*180/pi))
e = make_joining_whisker(px,py,d,b.thick[-1],b.scores[-1],a.thick[ 0],a.scores[ 0])
yield (b,e,a),l
def gap_measures(im,wv):
pmetric = lambda p: sqrt(dot(p[:-1],p[:-1]))
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
shape = (len(left),len(right) )
d = zeros( shape )
l = zeros( shape )
c = zeros( shape )
cx = zeros( shape )
cy = zeros( shape )
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[0 ]-b.x[-1]
dy = a.y[0 ]-b.y[-1]
d[i,j] = hypot(dx,dy)
px,py = solve_polynomial_join( b, a )
lpx,lpy = solve_polynomial_join( a, a, reverse = 1 )
rpx,rpy = solve_polynomial_join( b, b, reverse = 1 )
cx[i,j] = max( pmetric( px - lpx ) , pmetric( px - rpx ) )
cy[i,j] = max( pmetric( px - lpx ) , pmetric( py - rpy ) )
#l[i,j] = compute_join_length(px,py)
l[i,j] = compute_join_score(im,px,py)
plot_test(px,py)
#c[i,j] = compute_join_curvature(px,py)
#if sqrt( px[0]**2 + py[0]**2 ) < 50.0:
# plot_join(px,py)
return d,l,cx,cy
def trace_overlap(xxx_todo_changeme, xxx_todo_changeme1, thresh = 2.0 ):
# DONE: does not assume that indexes run along same direction
(wa,i) = xxx_todo_changeme
(wb,j) = xxx_todo_changeme1
def dist(ia,ib):
a,b = wa[ia], wb[ib]
return hypot( a[0] - b[0], a[1] - b[1] )
# determine relative direction of indexing
ia,ib = i,j
if ia == len(wa)-1 or ib == len(wb)-1:
if ia != 0 and ib != 0:
dax = wa.x[ia-1] - wa.x[ia]
day = wa.y[ia-1] - wa.y[ia]
dbx = wb.x[ib-1] - wb.x[ib]
dby = wb.y[ib-1] - wb.y[ib]
elif ia == 0:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = - wb.x[ib-1] + wb.x[ib]
dby = - wb.y[ib-1] + wb.y[ib]
elif ib == 0:
dax = - wa.x[ia-1] + wa.x[ia]
day = - wa.y[ia-1] + wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
else:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
stepa = -1; #only need to keep track of one direction
enda = 0;
notend = lambda i,n: i>n
if( abs(dax) > abs(day) ): #determine by x change
if( dax*dbx < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
else: #determine by y change
if( day*dby < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
bnda = [i,i]
bndb = [j,j]
ms = 0
while ms < thresh and notend(ia,enda) and ib > 0:
moves = ( ( ia + stepa, ib - 1 ),
( ia + stepa, ib ),
( ia , ib - 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate( scores ): #choose best move
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == 0:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib - 1 )
while s < last and ib > 1:
ib -= 1
last = s
s = dist( ia, ib - 1 )
elif ib == 0:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[0] = ia
bndb[0] = ib
#flip direction
if stepa == -1:
stepa = 1
enda = len(wa)
notend = lambda i,n:i<n-1
else:
stepa = -1
enda = 0
notend = lambda i,n: i>n
ia,ib = i,j
ms = 0
while ms < thresh and notend(ia,enda) and ib < len(wb)-1:
moves = ( ( ia + stepa, ib + 1 ),
( ia + stepa, ib ),
( ia , ib + 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate(scores):
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == len(wb)-1:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib + 1 )
while s < last and ib < len(wb)-2:
ib += 1
last = s
s = dist( ia, ib + 1 )
elif ib == len(wb)-1:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[1] = ia
bndb[1] = ib
bnda.sort()
return bnda, bndb
def resolution(table, wvd):
rest = set(wvd.values())
match = next(table)
while match:
keep,discard = merge(match)
if discard:
for a in discard:
table.remove( a )
for a in keep:
yield a
for a,i in match:
rest.discard(a)
match = next(table)
for a in rest:
yield a
def pairwise_merge( match ):
overhang = 8
wa = match[0][0]
wb = match[1][0]
bnda, bndb = trace_overlap(*match)
iscomplete = lambda bnd,w: bnd[0] < overhang and bnd[1] >= len(w)-overhang
if iscomplete(bnda,wa) or iscomplete(bndb,wb):
sa = wa.scores.sum()
sb = wb.scores.sum()
if sa > sb:
return wa,None
else:
return None,wb
return None,None
def merge( match ):
dep = dict( [ (e[0],0) for e in match ] )
#iterate through all pairs and mark those who are contained in another whisker
# The pairwise merge should impose a strict ordering
match = list(match)
for i,ma in enumerate(match):
for j,mb in enumerate(match[ (i+1): ]):
ra,rb = pairwise_merge( (ma,mb) )
if ra or rb:
if not ra:
dep[ma[0]] = 1
if not rb:
dep[mb[0]] = 1
# partition into two sets. Those to keep and those to discard.
# Those to keep depend on none of the others.
return [ k for k,v in dep.items() if v==0 ], \
[ k for k,v in dep.items() if v!=0 ]
class CollisionTable(object):
def __init__(self, wvd, shape, scale):
""" `wvd` may be either a dict or list of whiskers """
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = shape[1]/scale
self.topx = lambda p: int(p[0]/scale) + stride * int(p[1]/scale)
self._build_inverse_table( wvd )
def _build_inverse_table(self, wvd ):
g = enumerate(wvd)
if isinstance(wvd, dict):
g = iter(wvd.items())
for i,w in g:
self.add(w)
def update( self, changes ):
""" Changes is a dict mapping old whisker segments to new segments """
last = None
for w,p in changes.items():
self.remove(w)
if p:
self.add(p[0]) # add back ends
self.add(p[-1])
last = p[1]
if last:
self.add(last) # add back last middle
def add(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
self._map.setdefault(px,set()).add( (w,i) )
for i,px in hash(w): # scan back through and remove repeat hits on a pixel
for x in [e for e in self._map[px] if e[0] == w][1:]:
self._map[px].remove(x)
def remove(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
s = self._map.get(px)
if s:
s.discard( (w,i) )
def __iter__(self):
m = next(self)
while m:
yield m
m = next(self)
def __next__(self):
""" This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done.
"""
todelete = []
retval = None
for px,s in self._map.items():
todelete.append(px) # get rid of references to visited pixels
if len(s) > 1:
retval = s
break
for k in todelete:
del self._map[k]
return retval
def counts( self ):
tosc = lambda e: e/self._scale
im = zeros(list(map(tosc, self._shape)))
imr = im.ravel()
for px,s in self._map.items():
imr[px] = len(s) #len(set( [e for e,i in s] ))
return im
|
_build_labels_query
|
Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
# MASKED: _build_labels_query function (lines 138-177)
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
| 138 | 177 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
_build_events_query
|
Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
# MASKED: _build_events_query function (lines 179-191)
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
| 179 | 191 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
_build_query_dsl
|
Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
# MASKED: _build_query_dsl function (lines 193-255)
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
| 193 | 255 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
_convert_to_time_range
|
Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# MASKED: _convert_to_time_range function (lines 257-299)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
| 257 | 299 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
get_filter_labels
|
Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
# MASKED: get_filter_labels function (lines 651-717)
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
| 651 | 717 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
count
|
Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
# MASKED: count function (lines 754-786)
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
| 754 | 786 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
create_index
|
Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
# MASKED: create_index function (lines 853-898)
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
| 853 | 898 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
|
lpp_averageIndivTransit
|
Create the loop over individual transits and return
array normalized lpp values, mean and std.
Input TCE object and mapInfo object.
It is unclear that this individual transit approach
separates out several new false positives.
It probably would require retuning for low SNR signals.
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 20:32:12 2018
Functions to correctly fold and bin a light curve.
Calculate the lpp metric: transform to lower dimensions, knn
Depends on class from reading in a previously created LPP metric Map
Depends on reading in the light curve to data structure.
input is a class called data
data contains
data.time (days)
data.tzero (day)
data.dur (hours)
data.period (days)
data.flux (normalized to 0)
After foldBinLightCurve it contains
data.binned
After transform it contains
data.lpp_transform
@author: smullally
"""
from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors
from lpproj import LocalityPreservingProjection
import copy
def computeLPPTransitMetric(data,mapInfo):
"""
This function takes a data class with light curve info
and the mapInfo with information about the mapping to use.
It then returns a lpp metric value.
"""
binFlux, binPhase=foldBinLightCurve(data,mapInfo.ntrfr,mapInfo.npts)
#plt.figure()
#plt.plot(binPhase,binFlux,'.--')
#Dimensionality Reduction and knn parts
rawTLpp,transformedTransit=computeRawLPPTransitMetric(binFlux,mapInfo)
#Normalize by Period Dependence
normTLpp=periodNormalLPPTransitMetric(rawTLpp,np.array([data.period,data.mes]), mapInfo)
return normTLpp,rawTLpp,transformedTransit
def runningMedian(t,y,dt,runt):
"""
Take a running median of size dt
Return values at times given in runt
"""
newy=np.zeros(len(y))
newt=np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy=[]
for i in range(len(runt)):
tmp=[]
for j in range(len(newt)):
if (newt[j] >= (runt[i]-dt)) and (newt[j] <= (runt[i]+dt)):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))) :
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return(list(runt),runy)
def foldBinLightCurve (data, ntrfr, npts):
"""
Fold and bin light curve for input to LPP metric calculation
data contains time, tzero, dur, priod,mes and flux (centered around zero)
ntrfr -- number of transit fraction for binning around transit ~1.5
npts -- number of points in the final binning.
"""
#Create phase light curve
phaselc =np.mod((data.time-(data.tzero-0.5*data.period))/data.period,1)
flux=data.flux
mes=data.mes
#Determine the fraction of the time the planet transits the star.
#Insist that ntrfr * transit fraction
if ~np.isnan(data.dur) & (data.dur >0):
transit_dur = data.dur
else:
transit_dur = 0.2 * data.period/24.
transit_fr=transit_dur/24./data.period
if (transit_fr * ntrfr) > 0.5 :
transit_fr = 0.5/ntrfr
#Specify the out of transit (a) and the in transit regions
binover=1.3
if mes <= 20:
binover=-(1/8.0)*mes + 3.8
endfr = .03
midfr= .11
a = np.concatenate((np.arange(endfr,.5-midfr,1/npts) , \
np.arange((0.5+midfr),(1-endfr),1/npts)), axis=None)
ovsamp=4.0
#bstep=(ovsamp*ntrfr*transit_fr)/npts
b_num=41
b =np.linspace((0.5-ntrfr*transit_fr),(0.5+ntrfr*transit_fr),b_num)
#print "length a: %u " % len(a)
#print "length b: %u" % len(b)
[runta,runya] = runningMedian(phaselc,flux,binover/npts,a)
[runtb,runyb] = runningMedian(phaselc,flux,\
(binover*ovsamp*ntrfr*transit_fr)/npts,b)
#Combine the two sets of bins
runymess=np.array(runya + runyb)
runtmess = np.array(runta + runtb)
srt=np.argsort(runtmess)
runy=runymess[srt]
runt=runtmess[srt]
#Scale the flux by the depth so everything has the same depth.
#Catch or dividing by zero is to not scale.
scale = -1*np.min(runyb)
if scale != 0:
scaledFlux=runy/scale
else:
scaledFlux=runy
binnedFlux=scaledFlux
phasebins=runt
return binnedFlux,phasebins
def computeRawLPPTransitMetric(binFlux,mapInfo):
"""
Perform the matrix transformation with LPP
Do the knn test to get a raw LPP transit metric number.
"""
Yorig=mapInfo.YmapMapped
lpp=LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_=mapInfo.YmapM
#To equate to Matlab LPP methods, we need to remove mean of transform.
normBinFlux=binFlux-mapInfo.YmapMean
inputY=lpp.transform(normBinFlux.reshape(1,-1))
knownTransitsY=Yorig[mapInfo.knnGood,:]
dist,ind = knnDistance_fromKnown(knownTransitsY,inputY,mapInfo.knn)
rawLppTrMetric=np.mean(dist)
return rawLppTrMetric,inputY
def knnDistance_fromKnown(knownTransits,new,knn):
"""
For a group of known transits and a new one.
Use knn to determine how close the new one is to the known transits
using knn minkowski p = 3 ()
Using scipy signal to do this.
"""
#p=3 sets a minkowski distance of 3. #Check that you really used 3 for matlab.
nbrs=NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
distances,indices = nbrs.kneighbors(new)
return distances, indices
def periodNormalLPPTransitMetric(rawTLpp,newPerMes, mapInfo):
"""
Normalize the rawTransitMetric value by those with the closest period.
This part removes the period dependence of the metric at short periods.
Plus it makes a value near one be the threshold between good and bad.
newPerMes is the np.array([period, mes]) of the new sample
"""
knownTrPeriods=mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes=mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp=mapInfo.dymeans[mapInfo.knnGood]
nPercentil=mapInfo.nPercentil
nPsample=mapInfo.nPsample
#Find the those with the nearest periods Npsample-nneighbors
logPeriods=np.log10(knownTrPeriods)
logMes=np.log10(knownTrMes)
knownPerMes=np.stack((logPeriods, logMes), axis=-1)
np.shape(knownPerMes)
logNew=np.log10(newPerMes).reshape(1,-1)
#logNew=np.array([np.log10(newPeriod)]).reshape(1,1)
dist,ind = knnDistance_fromKnown(knownPerMes,logNew,nPsample)
#Find the nthPercentile of the rawLpp of these indicies
nearPeriodLpp=knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp,nPercentil)
NormLppTransitMetric=rawTLpp/LppNPercentile
return NormLppTransitMetric
def lpp_onetransit(tcedata,mapInfo,ntransit):
"""
Chop down the full time series to one orbital period.
Then gather the lpp value for that one transit.
"""
startTime=tcedata.time[0]+ntransit*tcedata.period
endTime=tcedata.time[0]+(ntransit+1)*tcedata.period + 3/24.0 #A few cadences of overlap
want=(tcedata.time>=startTime) & (tcedata.time<=endTime)
newtime=tcedata.time[want]
newflux=tcedata.flux[want]
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
if len(newtime>nExpCad*0.75):
onetransit=copy.deepcopy(tcedata)
onetransit.time=newtime
onetransit.flux=newflux
normTLpp, rawTLpp, transformedTr=computeLPPTransitMetric(onetransit,mapInfo)
else:
normTLpp=np.nan
rawTLpp=np.nan
return normTLpp,rawTLpp
# MASKED: lpp_averageIndivTransit function (lines 259-287)
|
def lpp_averageIndivTransit(tcedata,mapInfo):
"""
Create the loop over individual transits and return
array normalized lpp values, mean and std.
Input TCE object and mapInfo object.
It is unclear that this individual transit approach
separates out several new false positives.
It probably would require retuning for low SNR signals.
"""
length=tcedata.time[-1]-tcedata.time[0]
ntransits=int(np.floor(length/tcedata.period))
lppNorms=np.ones(ntransits)
lppRaws=np.ones(ntransits)
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
for i in range(ntransits):
lppNorms[i],lppRaws[i] = lpp_onetransit(tcedata,mapInfo,i)
lppMed=np.nanmedian(lppNorms)
lppStd=np.nanstd(lppNorms)
return lppNorms,lppMed, lppStd, ntransits
| 259 | 287 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 20:32:12 2018
Functions to correctly fold and bin a light curve.
Calculate the lpp metric: transform to lower dimensions, knn
Depends on class from reading in a previously created LPP metric Map
Depends on reading in the light curve to data structure.
input is a class called data
data contains
data.time (days)
data.tzero (day)
data.dur (hours)
data.period (days)
data.flux (normalized to 0)
After foldBinLightCurve it contains
data.binned
After transform it contains
data.lpp_transform
@author: smullally
"""
from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors
from lpproj import LocalityPreservingProjection
import copy
def computeLPPTransitMetric(data,mapInfo):
"""
This function takes a data class with light curve info
and the mapInfo with information about the mapping to use.
It then returns a lpp metric value.
"""
binFlux, binPhase=foldBinLightCurve(data,mapInfo.ntrfr,mapInfo.npts)
#plt.figure()
#plt.plot(binPhase,binFlux,'.--')
#Dimensionality Reduction and knn parts
rawTLpp,transformedTransit=computeRawLPPTransitMetric(binFlux,mapInfo)
#Normalize by Period Dependence
normTLpp=periodNormalLPPTransitMetric(rawTLpp,np.array([data.period,data.mes]), mapInfo)
return normTLpp,rawTLpp,transformedTransit
def runningMedian(t,y,dt,runt):
"""
Take a running median of size dt
Return values at times given in runt
"""
newy=np.zeros(len(y))
newt=np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy=[]
for i in range(len(runt)):
tmp=[]
for j in range(len(newt)):
if (newt[j] >= (runt[i]-dt)) and (newt[j] <= (runt[i]+dt)):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))) :
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return(list(runt),runy)
def foldBinLightCurve (data, ntrfr, npts):
"""
Fold and bin light curve for input to LPP metric calculation
data contains time, tzero, dur, priod,mes and flux (centered around zero)
ntrfr -- number of transit fraction for binning around transit ~1.5
npts -- number of points in the final binning.
"""
#Create phase light curve
phaselc =np.mod((data.time-(data.tzero-0.5*data.period))/data.period,1)
flux=data.flux
mes=data.mes
#Determine the fraction of the time the planet transits the star.
#Insist that ntrfr * transit fraction
if ~np.isnan(data.dur) & (data.dur >0):
transit_dur = data.dur
else:
transit_dur = 0.2 * data.period/24.
transit_fr=transit_dur/24./data.period
if (transit_fr * ntrfr) > 0.5 :
transit_fr = 0.5/ntrfr
#Specify the out of transit (a) and the in transit regions
binover=1.3
if mes <= 20:
binover=-(1/8.0)*mes + 3.8
endfr = .03
midfr= .11
a = np.concatenate((np.arange(endfr,.5-midfr,1/npts) , \
np.arange((0.5+midfr),(1-endfr),1/npts)), axis=None)
ovsamp=4.0
#bstep=(ovsamp*ntrfr*transit_fr)/npts
b_num=41
b =np.linspace((0.5-ntrfr*transit_fr),(0.5+ntrfr*transit_fr),b_num)
#print "length a: %u " % len(a)
#print "length b: %u" % len(b)
[runta,runya] = runningMedian(phaselc,flux,binover/npts,a)
[runtb,runyb] = runningMedian(phaselc,flux,\
(binover*ovsamp*ntrfr*transit_fr)/npts,b)
#Combine the two sets of bins
runymess=np.array(runya + runyb)
runtmess = np.array(runta + runtb)
srt=np.argsort(runtmess)
runy=runymess[srt]
runt=runtmess[srt]
#Scale the flux by the depth so everything has the same depth.
#Catch or dividing by zero is to not scale.
scale = -1*np.min(runyb)
if scale != 0:
scaledFlux=runy/scale
else:
scaledFlux=runy
binnedFlux=scaledFlux
phasebins=runt
return binnedFlux,phasebins
def computeRawLPPTransitMetric(binFlux,mapInfo):
"""
Perform the matrix transformation with LPP
Do the knn test to get a raw LPP transit metric number.
"""
Yorig=mapInfo.YmapMapped
lpp=LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_=mapInfo.YmapM
#To equate to Matlab LPP methods, we need to remove mean of transform.
normBinFlux=binFlux-mapInfo.YmapMean
inputY=lpp.transform(normBinFlux.reshape(1,-1))
knownTransitsY=Yorig[mapInfo.knnGood,:]
dist,ind = knnDistance_fromKnown(knownTransitsY,inputY,mapInfo.knn)
rawLppTrMetric=np.mean(dist)
return rawLppTrMetric,inputY
def knnDistance_fromKnown(knownTransits,new,knn):
"""
For a group of known transits and a new one.
Use knn to determine how close the new one is to the known transits
using knn minkowski p = 3 ()
Using scipy signal to do this.
"""
#p=3 sets a minkowski distance of 3. #Check that you really used 3 for matlab.
nbrs=NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
distances,indices = nbrs.kneighbors(new)
return distances, indices
def periodNormalLPPTransitMetric(rawTLpp,newPerMes, mapInfo):
"""
Normalize the rawTransitMetric value by those with the closest period.
This part removes the period dependence of the metric at short periods.
Plus it makes a value near one be the threshold between good and bad.
newPerMes is the np.array([period, mes]) of the new sample
"""
knownTrPeriods=mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes=mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp=mapInfo.dymeans[mapInfo.knnGood]
nPercentil=mapInfo.nPercentil
nPsample=mapInfo.nPsample
#Find the those with the nearest periods Npsample-nneighbors
logPeriods=np.log10(knownTrPeriods)
logMes=np.log10(knownTrMes)
knownPerMes=np.stack((logPeriods, logMes), axis=-1)
np.shape(knownPerMes)
logNew=np.log10(newPerMes).reshape(1,-1)
#logNew=np.array([np.log10(newPeriod)]).reshape(1,1)
dist,ind = knnDistance_fromKnown(knownPerMes,logNew,nPsample)
#Find the nthPercentile of the rawLpp of these indicies
nearPeriodLpp=knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp,nPercentil)
NormLppTransitMetric=rawTLpp/LppNPercentile
return NormLppTransitMetric
def lpp_onetransit(tcedata,mapInfo,ntransit):
"""
Chop down the full time series to one orbital period.
Then gather the lpp value for that one transit.
"""
startTime=tcedata.time[0]+ntransit*tcedata.period
endTime=tcedata.time[0]+(ntransit+1)*tcedata.period + 3/24.0 #A few cadences of overlap
want=(tcedata.time>=startTime) & (tcedata.time<=endTime)
newtime=tcedata.time[want]
newflux=tcedata.flux[want]
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
if len(newtime>nExpCad*0.75):
onetransit=copy.deepcopy(tcedata)
onetransit.time=newtime
onetransit.flux=newflux
normTLpp, rawTLpp, transformedTr=computeLPPTransitMetric(onetransit,mapInfo)
else:
normTLpp=np.nan
rawTLpp=np.nan
return normTLpp,rawTLpp
def lpp_averageIndivTransit(tcedata,mapInfo):
"""
Create the loop over individual transits and return
array normalized lpp values, mean and std.
Input TCE object and mapInfo object.
It is unclear that this individual transit approach
separates out several new false positives.
It probably would require retuning for low SNR signals.
"""
length=tcedata.time[-1]-tcedata.time[0]
ntransits=int(np.floor(length/tcedata.period))
lppNorms=np.ones(ntransits)
lppRaws=np.ones(ntransits)
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
for i in range(ntransits):
lppNorms[i],lppRaws[i] = lpp_onetransit(tcedata,mapInfo,i)
lppMed=np.nanmedian(lppNorms)
lppStd=np.nanstd(lppNorms)
return lppNorms,lppMed, lppStd, ntransits
|
__init__
|
Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
import torch
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.logger import setup_logger
from pytorch3d.io import save_obj
from pytorch3d.structures import Meshes
# required so that .register() calls are executed in module scope
import meshrcnn.data # noqa
import meshrcnn.modeling # noqa
import meshrcnn.utils # noqa
from meshrcnn.config import get_meshrcnn_cfg_defaults
from meshrcnn.evaluation import transform_meshes_to_camera_coord_system
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/pix3d/meshrcnn_R50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input image")
parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
args = get_parser().parse_args()
from meshrcnn.data.datasets.register_pix3d import register_pix3d
register_pix3d(args.opts[1])
import cv2
logger = logging.getLogger("demo")
class VisualizationDemo(object):
# MASKED: __init__ function (lines 60-76)
def run_on_image(self, image, focal_length=10.0):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
focal_length (float): the focal_length of the image
Returns:
predictions (dict): the output of the model.
"""
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
# camera matrix
imsize = [image.shape[0], image.shape[1]]
# focal <- focal * image_width / 32
focal_length = image.shape[1] / 32 * focal_length
K = [focal_length, image.shape[1] / 2, image.shape[0] / 2]
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(
verts=[mesh[0] for mesh in instances.pred_meshes],
faces=[mesh[1] for mesh in instances.pred_meshes],
)
pred_dz = instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1])
tc = pred_dz.abs().max() + 1.0
zranges = torch.stack(
[
torch.stack(
[
tc - tc * pred_dz[i] / 2.0 / focal_length,
tc + tc * pred_dz[i] / 2.0 / focal_length,
]
)
for i in range(len(meshes))
],
dim=0,
)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(
meshes, boxes.tensor, zranges, Ks, imsize
)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(
det_id,
image,
boxes.tensor[det_id],
labels[det_id],
scores[det_id],
masks[det_id],
meshes[det_id],
)
return predictions
def visualize_prediction(
self, det_id, image, box, label, score, mask, mesh, alpha=0.6, dpi=200
):
mask_color = np.array(self.colors[label], dtype=np.float32)
cat_name = self.cat_names[label]
thickness = max([int(np.ceil(0.001 * image.shape[0])), 1])
box_color = (0, 255, 0) # '#00ff00', green
text_color = (218, 227, 218) # gray
composite = image.copy().astype(np.float32)
# overlay mask
idx = mask.nonzero()
composite[idx[:, 0], idx[:, 1], :] *= 1.0 - alpha
composite[idx[:, 0], idx[:, 1], :] += alpha * mask_color
# overlay box
(x0, y0, x1, y1) = (int(x + 0.5) for x in box)
composite = cv2.rectangle(
composite, (x0, y0), (x1, y1), color=box_color, thickness=thickness
)
composite = composite.astype(np.uint8)
# overlay text
font_scale = 0.001 * image.shape[0]
font_thickness = thickness
font = cv2.FONT_HERSHEY_TRIPLEX
text = "%s %.3f" % (cat_name, score)
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, font_thickness)
# Place text background.
if x0 + text_w > composite.shape[1]:
x0 = composite.shape[1] - text_w
if y0 - int(1.2 * text_h) < 0:
y0 = int(1.2 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(composite, back_topleft, back_bottomright, box_color, -1)
# Show text
text_bottomleft = x0, y0 - int(0.2 * text_h)
cv2.putText(
composite,
text,
text_bottomleft,
font,
font_scale,
text_color,
thickness=font_thickness,
lineType=cv2.LINE_AA,
)
save_file = os.path.join(self.output_dir, "%d_mask_%s_%.3f.png" % (det_id, cat_name, score))
cv2.imwrite(save_file, composite[:, :, ::-1])
save_file = os.path.join(self.output_dir, "%d_mesh_%s_%.3f.obj" % (det_id, cat_name, score))
verts, faces = mesh.get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
def setup_cfg(args):
cfg = get_cfg()
get_meshrcnn_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger(name="demo")
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
im_name = args.input.split("/")[-1].split(".")[0]
demo = VisualizationDemo(
cfg, vis_highest_scoring=args.onlyhighest, output_dir=os.path.join(args.output, im_name)
)
# use PIL, to be consistent with evaluation
img = read_image(args.input, format="BGR")
predictions = demo.run_on_image(img, focal_length=args.focal_length)
logger.info("Predictions saved in %s" % (os.path.join(args.output, im_name)))
|
def __init__(self, cfg, vis_highest_scoring=True, output_dir="./vis"):
"""
Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction
"""
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device("cpu")
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir
| 60 | 76 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
import torch
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.logger import setup_logger
from pytorch3d.io import save_obj
from pytorch3d.structures import Meshes
# required so that .register() calls are executed in module scope
import meshrcnn.data # noqa
import meshrcnn.modeling # noqa
import meshrcnn.utils # noqa
from meshrcnn.config import get_meshrcnn_cfg_defaults
from meshrcnn.evaluation import transform_meshes_to_camera_coord_system
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/pix3d/meshrcnn_R50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input image")
parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
args = get_parser().parse_args()
from meshrcnn.data.datasets.register_pix3d import register_pix3d
register_pix3d(args.opts[1])
import cv2
logger = logging.getLogger("demo")
class VisualizationDemo(object):
def __init__(self, cfg, vis_highest_scoring=True, output_dir="./vis"):
"""
Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction
"""
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device("cpu")
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir
def run_on_image(self, image, focal_length=10.0):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
focal_length (float): the focal_length of the image
Returns:
predictions (dict): the output of the model.
"""
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
# camera matrix
imsize = [image.shape[0], image.shape[1]]
# focal <- focal * image_width / 32
focal_length = image.shape[1] / 32 * focal_length
K = [focal_length, image.shape[1] / 2, image.shape[0] / 2]
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(
verts=[mesh[0] for mesh in instances.pred_meshes],
faces=[mesh[1] for mesh in instances.pred_meshes],
)
pred_dz = instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1])
tc = pred_dz.abs().max() + 1.0
zranges = torch.stack(
[
torch.stack(
[
tc - tc * pred_dz[i] / 2.0 / focal_length,
tc + tc * pred_dz[i] / 2.0 / focal_length,
]
)
for i in range(len(meshes))
],
dim=0,
)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(
meshes, boxes.tensor, zranges, Ks, imsize
)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(
det_id,
image,
boxes.tensor[det_id],
labels[det_id],
scores[det_id],
masks[det_id],
meshes[det_id],
)
return predictions
def visualize_prediction(
self, det_id, image, box, label, score, mask, mesh, alpha=0.6, dpi=200
):
mask_color = np.array(self.colors[label], dtype=np.float32)
cat_name = self.cat_names[label]
thickness = max([int(np.ceil(0.001 * image.shape[0])), 1])
box_color = (0, 255, 0) # '#00ff00', green
text_color = (218, 227, 218) # gray
composite = image.copy().astype(np.float32)
# overlay mask
idx = mask.nonzero()
composite[idx[:, 0], idx[:, 1], :] *= 1.0 - alpha
composite[idx[:, 0], idx[:, 1], :] += alpha * mask_color
# overlay box
(x0, y0, x1, y1) = (int(x + 0.5) for x in box)
composite = cv2.rectangle(
composite, (x0, y0), (x1, y1), color=box_color, thickness=thickness
)
composite = composite.astype(np.uint8)
# overlay text
font_scale = 0.001 * image.shape[0]
font_thickness = thickness
font = cv2.FONT_HERSHEY_TRIPLEX
text = "%s %.3f" % (cat_name, score)
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, font_thickness)
# Place text background.
if x0 + text_w > composite.shape[1]:
x0 = composite.shape[1] - text_w
if y0 - int(1.2 * text_h) < 0:
y0 = int(1.2 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(composite, back_topleft, back_bottomright, box_color, -1)
# Show text
text_bottomleft = x0, y0 - int(0.2 * text_h)
cv2.putText(
composite,
text,
text_bottomleft,
font,
font_scale,
text_color,
thickness=font_thickness,
lineType=cv2.LINE_AA,
)
save_file = os.path.join(self.output_dir, "%d_mask_%s_%.3f.png" % (det_id, cat_name, score))
cv2.imwrite(save_file, composite[:, :, ::-1])
save_file = os.path.join(self.output_dir, "%d_mesh_%s_%.3f.obj" % (det_id, cat_name, score))
verts, faces = mesh.get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
def setup_cfg(args):
cfg = get_cfg()
get_meshrcnn_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger(name="demo")
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
im_name = args.input.split("/")[-1].split(".")[0]
demo = VisualizationDemo(
cfg, vis_highest_scoring=args.onlyhighest, output_dir=os.path.join(args.output, im_name)
)
# use PIL, to be consistent with evaluation
img = read_image(args.input, format="BGR")
predictions = demo.run_on_image(img, focal_length=args.focal_length)
logger.info("Predictions saved in %s" % (os.path.join(args.output, im_name)))
|
_get_field_uniq_x_coef
|
This function outputs threshold to number of occurrences different variants of list of columns (fields)
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_tabular.pd.ipynb (unless otherwise specified).
__all__ = ['PartDep']
# Cell
from fastai.tabular.all import *
from .core import *
# Cell
from plotnine import *
# Cell
from IPython.display import clear_output
# Cell
class PartDep(Interpret):
"""
Calculate Partial Dependence. Countinious vars are divided into buckets and are analized as well
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold (as min number of occurances)
use_log=True is needed if we have transformed depended variable into log
use_int=True is needed if we want to log-detransformed (exponented) var to me integer not float
is_couninue=True helps with long calculation, it continues the last calculation from the saved file
is_use_cache=True loads last fully calculated result. Can distinct caches that were mede with different
fields and coef
no_precalc=True -- don't calculate PartDep (usefull if you want to use `plot_raw` and `plot_model` only)
"""
def __init__(self, learn, df, model_name: str, fields: list = (), coef: float = 1.0,
is_sorted: bool = True, use_log=False, use_int=False,
cache_path=None, is_use_cache=True, is_continue=False, no_precalc=False):
super().__init__(learn, df)
self.use_log = use_log
self.use_int = use_int
self.coef = coef
self.is_sorted = is_sorted
if (fields is None) or (len(fields) == 0):
self.fields = self._get_all_columns()
else:
self.fields = listify(fields)
self.part_dep_df = None
self.cache_path = ifnone(cache_path, learn.path / 'cache')
self.save_name = f"{model_name}_part_dep"
self.is_use_cache = is_use_cache
self.is_continue = is_continue
self.dep_var = self._get_dep_var()
self.is_biclassification = True if (learn.dls.c == 2) else False
if (no_precalc==False):
self._load_or_calculate()
@classmethod
def what_cached(self, model_name: str, path=None, learn=None):
"""
Shows what keys are cached
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
if not (Path(f"{path / name}.pkl").exists()):
print(f"No chache file")
else:
f = open(path / f"{name}.pkl", "rb")
var = load(f)
f.close()
for k in var.keys():
print(k)
@classmethod
def empty_cache(self, model_name: str, path=None, learn=None):
"""
deletes the cache file
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
files = (Path(f"{path / name}.pkl"), Path(path / 'pd_interm.pkl'))
for file in files:
if not (file.exists()):
print(f"No chache file {file}")
else:
file.unlink()
def _cont_into_buckets(self, df_init, CONT_COLS):
"""
Categorical values can be easily distiguished one from another
But that doesn't work with continious values, we have to divede it's
values into buckets and then use all values in a bucket as a single value
that avarages the bucket. This way we convert cont feture into pseudo categorical
and are able to apply partial dependense analysis to it
"""
fields = self.fields
df = df_init.copy()
if is_in_list(values=fields, in_list=CONT_COLS):
for col in which_elms(values=fields, in_list=CONT_COLS):
edges = np.histogram_bin_edges(a=df[col].dropna(), bins='auto')
for x, y in zip(edges[::], edges[1::]):
df.loc[(df[col] > x) & (df[col] < y), col] = (x + y) / 2
return df
# MASKED: _get_field_uniq_x_coef function (lines 123-139)
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame:
'''
Function calculate partial dependency for column in fields.
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
'''
NAN_SUBST = '###na###'
cont_vars = self._get_cont_columns()
fields = listify(fields)
coef, is_sorted, use_log, use_int = self.coef, self.is_sorted, self.use_log, self.use_int
dep_name = self._get_dep_var()
df = self._cont_into_buckets(df_init=self.df, CONT_COLS=cont_vars)
# here we prepare data to eliminate pairs that occure too little
# and make NaN a separate value to appear in occures
field_min_occ = self._get_field_uniq_x_coef(df=df, fields=fields, coef=coef)
df[fields] = df[fields].fillna(NAN_SUBST) # to treat None as a separate field
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
occs[fields] = occs[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
df[fields] = df[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
occs = occs[occs['Times'] >= field_min_occ]
df_copy = df.merge(occs[fields]).copy()
# here for every pair of values of fields we substitute it's values in original df
# with the current one and calculate predictions
# So we predict mean dep_var for every pairs of value of fields on the whole dataset
frame = []
ln = len(occs)
if (ln > 0):
for _, row in progress_bar(occs.iterrows(), total=ln, parent=masterbar):
# We don't need to do df_copy = df.merge(occs[field]).copy() every time
# as every time we change the same column (set of columns)
record = []
for fld in fields:
df_copy[fld] = row[fld]
preds = self._predict_df(df=df_copy)
preds = np.exp(np.mean(preds)) if (use_log == True) else np.mean(preds)
preds = int(preds) if (use_int == True) else preds
for fld in fields:
record.append(row[fld])
record.append(preds)
record.append(row['Times'])
frame.append(record)
# Here for every pair of fields we calculate mean dep_var deviation
# This devition is the score that shows how and where this partucular pair of fields
# moves depend valiable
# Added times to more easily understand the data (more times more sure we are)
out = pd.DataFrame(frame, columns=fields + [dep_name, 'times'])
median = out[dep_name].median()
out[dep_name] /= median
if (is_sorted == True):
out = out.sort_values(by=dep_name, ascending=False)
return out
def _get_part_dep(self):
'''
Makes a datafreme with partial dependencies for every pair of columns in fields
'''
fields = self.fields
learn = self.learn
cache_path = self.cache_path
dep_name = self._get_dep_var()
is_continue = self.is_continue
l2k = self._list_to_key
result = []
to_save = {}
from_saved = {}
# Load from cache
if (is_continue == True):
if Path(cache_path / 'pd_interm.pkl').exists():
from_saved = ld_var(name='pd_interm', path=cache_path)
else:
is_continue = False
elapsed = []
left = []
if (is_continue == True):
for field in fields:
if (l2k(field) in from_saved):
elapsed.append(field)
new_df = from_saved[l2k(field)]
result.append(new_df)
to_save[l2k(field)] = new_df
for field in fields:
if (l2k(field) not in from_saved):
left.append(field)
# Calculate
pbar = master_bar(left)
cache_path.mkdir(parents=True, exist_ok=True)
sv_var(var=to_save, name='pd_interm', path=cache_path)
for field in pbar:
new_df = self._get_part_dep_one(fields=field, masterbar=pbar)
new_df['feature'] = self._list_to_key(field)
if is_listy(field):
new_df['value'] = new_df[field].values.tolist()
new_df.drop(columns=field, inplace=True)
else:
new_df = new_df.rename(index=str, columns={str(field): "value"})
result.append(new_df)
to_save[l2k(field)] = new_df
sv_var(var=to_save, name='pd_interm', path=cache_path)
clear_output()
if Path(cache_path / 'pd_interm.pkl').exists():
Path(cache_path / 'pd_interm.pkl').unlink() # delete intermediate file
result = pd.concat(result, ignore_index=True, sort=True)
result = result[['feature', 'value', dep_name, 'times']]
clear_output()
self.part_dep_df = result
def _load_dict(self, name, path):
if not (Path(f"{path / name}.pkl").exists()):
return None
return self._ld_var(name=name, path=path)
def _save_cached(self):
"""
Saves calculated PartDep df into path.
Can be saved more than one with as an dict with fields as key
"""
path = self.cache_path
path.mkdir(parents=True, exist_ok=True)
name = self.save_name
sv_dict = self._load_dict(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if isNone(sv_dict):
sv_dict = {key: self.part_dep_df}
else:
sv_dict[key] = self.part_dep_df
self._sv_var(var=sv_dict, name=name, path=path)
def _load_cached(self):
"""
Load calculated PartDep df if hash exist.
"""
name = self.save_name
path = self.cache_path
if not (Path(f"{path / name}.pkl").exists()):
return None
ld_dict = self._ld_var(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if (key not in ld_dict):
return None
return ld_dict[key]
def _load_or_calculate(self):
"""
Calculates part dep or load it from cache if possible
"""
if (self.is_use_cache == False) or isNone(self._load_cached()):
self._get_part_dep()
return self._save_cached()
else:
self.part_dep_df = self._load_cached()
def _general2partial(self, df):
if (len(df) == 0):
return None
copy_df = df.copy()
feature = copy_df['feature'].iloc[0]
copy_df.drop(columns='feature', inplace=True)
copy_df.rename(columns={"value": feature}, inplace=True)
return copy_df
def plot_raw(self, field, sample=1.0):
"""
Plot dependency graph from data itself
field must be list of exactly one feature
sample is a coef to len(df). Lower if kernel use to shut down on that
"""
df = self.df
df = df.sample(int(len(df)*sample))
field = field[0]
dep_var = f"{self._get_dep_var()}_orig" if (self.use_log == True) else self._get_dep_var()
return ggplot(df, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def plot_model(self, field, strict_recalc=False, sample=1.0):
'''
Plot dependency graph from the model.
It also take into account times, so plot becomes much more resilient, cause not every value treats as equal
(more occurences means more power)
field must be list of exactly one feature
strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway
sample is a coef to len(df). Lower if kernel use to shut down on that
'''
cached = self.get_pd(feature=self._list_to_key(field))
if (strict_recalc == False) and isNotNone(cached):
pd_table = cached
else:
pd_table = self._get_part_dep_one(fields=field)
clear_output()
field = field[0]
dep_var = f"{self._get_dep_var()}"
rearr = []
for var, fee, times in zip(pd_table[field], pd_table[dep_var], pd_table['times']):
for i in range(int(times)):
rearr.append([var, fee])
rearr = pd.DataFrame(rearr, columns=[field, dep_var])
rearr = rearr.sample(int(len(rearr)*sample))
return ggplot(rearr, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def get_pd(self, feature, min_tm=1):
"""
Gets particular feature subtable from the whole one (min times is optional parameter)
"""
if isNone(self.part_dep_df):
return None
df = self.part_dep_df.query(f"""(feature == "{feature}") and (times > {min_tm})""")
return self._general2partial(df=df)
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1):
"""
Transforms whole features table to get_part_dep_one output table format
"""
def get_xth_el(str_list: str, indexes: list):
lst = str_list if is_listy(str_list) else ast.literal_eval(str_list)
lst = listify(lst)
if (len(lst) == 1):
return lst[0]
elif (len(lst) > 1):
if (len(indexes) == 1):
return lst[indexes[0]]
else:
return [lst[idx] for idx in indexes]
else:
return None
feat_table = self.part_dep_df
main_feat_idx = listify(main_feat_idx)
feat_table_copy = feat_table.copy()
func = functools.partial(get_xth_el, indexes=main_feat_idx)
feat_table_copy['value'] = feat_table_copy['value'].apply(func)
feat_table_copy.drop(columns='feature', inplace=True)
return feat_table_copy.query(f'times > {show_min}')
def plot_part_dep(self, fields, limit=20, asc=False):
"""
Plots partial dependency plot for sublist of connected `fields`
`fields` must be sublist of `fields` given on initalization calculation
"""
def prepare_colors(df_pd: pd.DataFrame):
heat_min = df_pd['times'].min()
heat_max = df_pd['times'].max()
dif = heat_max - heat_min
colors = [((times - heat_min) / (dif), (times - heat_min) / (4 * dif), 0.75) for times in df_pd['times']]
return colors
df = self.part_dep_df.query(f"feature == '{self._list_to_key(fields)}'")
dep_var = self.dep_var
df_copy = df.copy()
df_copy['feature'] = df_copy['feature'].str.slice(0, 45)
df_copy = df_copy.sort_values(by=dep_var, ascending=asc)[:limit].sort_values(by=dep_var, ascending=not (asc))
colors = prepare_colors(df_pd=df_copy)
ax = df_copy.plot.barh(x="value", y=dep_var, sort_columns=True, figsize=(10, 10),
color=colors, title=self._list_to_key(fields))
ax.set_ylabel(fields)
if (self.is_biclassification):
txt = f"According to probability of {self._get_dep_var()} is '{learn.dls.vocab[0]}'"
ax.annotate(txt, (0,0), (0, -30),
xycoords='axes fraction', textcoords='offset points',
va='top')
for (p, t) in zip(ax.patches, df_copy['times']):
ax.annotate(f'{p.get_width():.4f}', ((p.get_width() * 1.005), p.get_y() * 1.005))
ax.annotate(f'{int(t)}', ((p.get_width() * .45), p.get_y() + 0.1), color='white', weight='bold')
|
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list:
'''
This function outputs threshold to number of occurrences different variants of list of columns (fields)
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold
'''
if (coef > 1):
return math.ceil(coef)
coef = 0. if (coef < 0) else coef
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
num = math.ceil(coef * len(occs))
if (num <= 0):
# number of occurances is now = max_occs+1 (so it will be no items with this filter)
return occs.iloc[0]['Times'] + 1
else:
return occs.iloc[num - 1]['Times']
| 123 | 139 |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_tabular.pd.ipynb (unless otherwise specified).
__all__ = ['PartDep']
# Cell
from fastai.tabular.all import *
from .core import *
# Cell
from plotnine import *
# Cell
from IPython.display import clear_output
# Cell
class PartDep(Interpret):
"""
Calculate Partial Dependence. Countinious vars are divided into buckets and are analized as well
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold (as min number of occurances)
use_log=True is needed if we have transformed depended variable into log
use_int=True is needed if we want to log-detransformed (exponented) var to me integer not float
is_couninue=True helps with long calculation, it continues the last calculation from the saved file
is_use_cache=True loads last fully calculated result. Can distinct caches that were mede with different
fields and coef
no_precalc=True -- don't calculate PartDep (usefull if you want to use `plot_raw` and `plot_model` only)
"""
def __init__(self, learn, df, model_name: str, fields: list = (), coef: float = 1.0,
is_sorted: bool = True, use_log=False, use_int=False,
cache_path=None, is_use_cache=True, is_continue=False, no_precalc=False):
super().__init__(learn, df)
self.use_log = use_log
self.use_int = use_int
self.coef = coef
self.is_sorted = is_sorted
if (fields is None) or (len(fields) == 0):
self.fields = self._get_all_columns()
else:
self.fields = listify(fields)
self.part_dep_df = None
self.cache_path = ifnone(cache_path, learn.path / 'cache')
self.save_name = f"{model_name}_part_dep"
self.is_use_cache = is_use_cache
self.is_continue = is_continue
self.dep_var = self._get_dep_var()
self.is_biclassification = True if (learn.dls.c == 2) else False
if (no_precalc==False):
self._load_or_calculate()
@classmethod
def what_cached(self, model_name: str, path=None, learn=None):
"""
Shows what keys are cached
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
if not (Path(f"{path / name}.pkl").exists()):
print(f"No chache file")
else:
f = open(path / f"{name}.pkl", "rb")
var = load(f)
f.close()
for k in var.keys():
print(k)
@classmethod
def empty_cache(self, model_name: str, path=None, learn=None):
"""
deletes the cache file
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
files = (Path(f"{path / name}.pkl"), Path(path / 'pd_interm.pkl'))
for file in files:
if not (file.exists()):
print(f"No chache file {file}")
else:
file.unlink()
def _cont_into_buckets(self, df_init, CONT_COLS):
"""
Categorical values can be easily distiguished one from another
But that doesn't work with continious values, we have to divede it's
values into buckets and then use all values in a bucket as a single value
that avarages the bucket. This way we convert cont feture into pseudo categorical
and are able to apply partial dependense analysis to it
"""
fields = self.fields
df = df_init.copy()
if is_in_list(values=fields, in_list=CONT_COLS):
for col in which_elms(values=fields, in_list=CONT_COLS):
edges = np.histogram_bin_edges(a=df[col].dropna(), bins='auto')
for x, y in zip(edges[::], edges[1::]):
df.loc[(df[col] > x) & (df[col] < y), col] = (x + y) / 2
return df
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list:
'''
This function outputs threshold to number of occurrences different variants of list of columns (fields)
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold
'''
if (coef > 1):
return math.ceil(coef)
coef = 0. if (coef < 0) else coef
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
num = math.ceil(coef * len(occs))
if (num <= 0):
# number of occurances is now = max_occs+1 (so it will be no items with this filter)
return occs.iloc[0]['Times'] + 1
else:
return occs.iloc[num - 1]['Times']
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame:
'''
Function calculate partial dependency for column in fields.
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
'''
NAN_SUBST = '###na###'
cont_vars = self._get_cont_columns()
fields = listify(fields)
coef, is_sorted, use_log, use_int = self.coef, self.is_sorted, self.use_log, self.use_int
dep_name = self._get_dep_var()
df = self._cont_into_buckets(df_init=self.df, CONT_COLS=cont_vars)
# here we prepare data to eliminate pairs that occure too little
# and make NaN a separate value to appear in occures
field_min_occ = self._get_field_uniq_x_coef(df=df, fields=fields, coef=coef)
df[fields] = df[fields].fillna(NAN_SUBST) # to treat None as a separate field
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
occs[fields] = occs[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
df[fields] = df[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
occs = occs[occs['Times'] >= field_min_occ]
df_copy = df.merge(occs[fields]).copy()
# here for every pair of values of fields we substitute it's values in original df
# with the current one and calculate predictions
# So we predict mean dep_var for every pairs of value of fields on the whole dataset
frame = []
ln = len(occs)
if (ln > 0):
for _, row in progress_bar(occs.iterrows(), total=ln, parent=masterbar):
# We don't need to do df_copy = df.merge(occs[field]).copy() every time
# as every time we change the same column (set of columns)
record = []
for fld in fields:
df_copy[fld] = row[fld]
preds = self._predict_df(df=df_copy)
preds = np.exp(np.mean(preds)) if (use_log == True) else np.mean(preds)
preds = int(preds) if (use_int == True) else preds
for fld in fields:
record.append(row[fld])
record.append(preds)
record.append(row['Times'])
frame.append(record)
# Here for every pair of fields we calculate mean dep_var deviation
# This devition is the score that shows how and where this partucular pair of fields
# moves depend valiable
# Added times to more easily understand the data (more times more sure we are)
out = pd.DataFrame(frame, columns=fields + [dep_name, 'times'])
median = out[dep_name].median()
out[dep_name] /= median
if (is_sorted == True):
out = out.sort_values(by=dep_name, ascending=False)
return out
def _get_part_dep(self):
'''
Makes a datafreme with partial dependencies for every pair of columns in fields
'''
fields = self.fields
learn = self.learn
cache_path = self.cache_path
dep_name = self._get_dep_var()
is_continue = self.is_continue
l2k = self._list_to_key
result = []
to_save = {}
from_saved = {}
# Load from cache
if (is_continue == True):
if Path(cache_path / 'pd_interm.pkl').exists():
from_saved = ld_var(name='pd_interm', path=cache_path)
else:
is_continue = False
elapsed = []
left = []
if (is_continue == True):
for field in fields:
if (l2k(field) in from_saved):
elapsed.append(field)
new_df = from_saved[l2k(field)]
result.append(new_df)
to_save[l2k(field)] = new_df
for field in fields:
if (l2k(field) not in from_saved):
left.append(field)
# Calculate
pbar = master_bar(left)
cache_path.mkdir(parents=True, exist_ok=True)
sv_var(var=to_save, name='pd_interm', path=cache_path)
for field in pbar:
new_df = self._get_part_dep_one(fields=field, masterbar=pbar)
new_df['feature'] = self._list_to_key(field)
if is_listy(field):
new_df['value'] = new_df[field].values.tolist()
new_df.drop(columns=field, inplace=True)
else:
new_df = new_df.rename(index=str, columns={str(field): "value"})
result.append(new_df)
to_save[l2k(field)] = new_df
sv_var(var=to_save, name='pd_interm', path=cache_path)
clear_output()
if Path(cache_path / 'pd_interm.pkl').exists():
Path(cache_path / 'pd_interm.pkl').unlink() # delete intermediate file
result = pd.concat(result, ignore_index=True, sort=True)
result = result[['feature', 'value', dep_name, 'times']]
clear_output()
self.part_dep_df = result
def _load_dict(self, name, path):
if not (Path(f"{path / name}.pkl").exists()):
return None
return self._ld_var(name=name, path=path)
def _save_cached(self):
"""
Saves calculated PartDep df into path.
Can be saved more than one with as an dict with fields as key
"""
path = self.cache_path
path.mkdir(parents=True, exist_ok=True)
name = self.save_name
sv_dict = self._load_dict(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if isNone(sv_dict):
sv_dict = {key: self.part_dep_df}
else:
sv_dict[key] = self.part_dep_df
self._sv_var(var=sv_dict, name=name, path=path)
def _load_cached(self):
"""
Load calculated PartDep df if hash exist.
"""
name = self.save_name
path = self.cache_path
if not (Path(f"{path / name}.pkl").exists()):
return None
ld_dict = self._ld_var(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if (key not in ld_dict):
return None
return ld_dict[key]
def _load_or_calculate(self):
"""
Calculates part dep or load it from cache if possible
"""
if (self.is_use_cache == False) or isNone(self._load_cached()):
self._get_part_dep()
return self._save_cached()
else:
self.part_dep_df = self._load_cached()
def _general2partial(self, df):
if (len(df) == 0):
return None
copy_df = df.copy()
feature = copy_df['feature'].iloc[0]
copy_df.drop(columns='feature', inplace=True)
copy_df.rename(columns={"value": feature}, inplace=True)
return copy_df
def plot_raw(self, field, sample=1.0):
"""
Plot dependency graph from data itself
field must be list of exactly one feature
sample is a coef to len(df). Lower if kernel use to shut down on that
"""
df = self.df
df = df.sample(int(len(df)*sample))
field = field[0]
dep_var = f"{self._get_dep_var()}_orig" if (self.use_log == True) else self._get_dep_var()
return ggplot(df, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def plot_model(self, field, strict_recalc=False, sample=1.0):
'''
Plot dependency graph from the model.
It also take into account times, so plot becomes much more resilient, cause not every value treats as equal
(more occurences means more power)
field must be list of exactly one feature
strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway
sample is a coef to len(df). Lower if kernel use to shut down on that
'''
cached = self.get_pd(feature=self._list_to_key(field))
if (strict_recalc == False) and isNotNone(cached):
pd_table = cached
else:
pd_table = self._get_part_dep_one(fields=field)
clear_output()
field = field[0]
dep_var = f"{self._get_dep_var()}"
rearr = []
for var, fee, times in zip(pd_table[field], pd_table[dep_var], pd_table['times']):
for i in range(int(times)):
rearr.append([var, fee])
rearr = pd.DataFrame(rearr, columns=[field, dep_var])
rearr = rearr.sample(int(len(rearr)*sample))
return ggplot(rearr, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def get_pd(self, feature, min_tm=1):
"""
Gets particular feature subtable from the whole one (min times is optional parameter)
"""
if isNone(self.part_dep_df):
return None
df = self.part_dep_df.query(f"""(feature == "{feature}") and (times > {min_tm})""")
return self._general2partial(df=df)
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1):
"""
Transforms whole features table to get_part_dep_one output table format
"""
def get_xth_el(str_list: str, indexes: list):
lst = str_list if is_listy(str_list) else ast.literal_eval(str_list)
lst = listify(lst)
if (len(lst) == 1):
return lst[0]
elif (len(lst) > 1):
if (len(indexes) == 1):
return lst[indexes[0]]
else:
return [lst[idx] for idx in indexes]
else:
return None
feat_table = self.part_dep_df
main_feat_idx = listify(main_feat_idx)
feat_table_copy = feat_table.copy()
func = functools.partial(get_xth_el, indexes=main_feat_idx)
feat_table_copy['value'] = feat_table_copy['value'].apply(func)
feat_table_copy.drop(columns='feature', inplace=True)
return feat_table_copy.query(f'times > {show_min}')
def plot_part_dep(self, fields, limit=20, asc=False):
"""
Plots partial dependency plot for sublist of connected `fields`
`fields` must be sublist of `fields` given on initalization calculation
"""
def prepare_colors(df_pd: pd.DataFrame):
heat_min = df_pd['times'].min()
heat_max = df_pd['times'].max()
dif = heat_max - heat_min
colors = [((times - heat_min) / (dif), (times - heat_min) / (4 * dif), 0.75) for times in df_pd['times']]
return colors
df = self.part_dep_df.query(f"feature == '{self._list_to_key(fields)}'")
dep_var = self.dep_var
df_copy = df.copy()
df_copy['feature'] = df_copy['feature'].str.slice(0, 45)
df_copy = df_copy.sort_values(by=dep_var, ascending=asc)[:limit].sort_values(by=dep_var, ascending=not (asc))
colors = prepare_colors(df_pd=df_copy)
ax = df_copy.plot.barh(x="value", y=dep_var, sort_columns=True, figsize=(10, 10),
color=colors, title=self._list_to_key(fields))
ax.set_ylabel(fields)
if (self.is_biclassification):
txt = f"According to probability of {self._get_dep_var()} is '{learn.dls.vocab[0]}'"
ax.annotate(txt, (0,0), (0, -30),
xycoords='axes fraction', textcoords='offset points',
va='top')
for (p, t) in zip(ax.patches, df_copy['times']):
ax.annotate(f'{p.get_width():.4f}', ((p.get_width() * 1.005), p.get_y() * 1.005))
ax.annotate(f'{int(t)}', ((p.get_width() * .45), p.get_y() + 0.1), color='white', weight='bold')
|
sigmoid_focal_loss
|
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\mathop{loss_{i,\,j}}\limits_{i\in\mathbb{[0,\,N-1]},\,j\in\mathbb{[0,\,C-1]}}=\left\{
\begin{array}{rcl}
- \frac{1}{fg\_num} * \alpha * {(1 - \sigma(x_{i,\,j}))}^{\gamma} * \log(\sigma(x_{i,\,j})) & & {(j +1) = label_{i,\,0}} \\
- \frac{1}{fg\_num} * (1 - \alpha) * {\sigma(x_{i,\,j})}^{ \gamma} * \log(1 - \sigma(x_{i,\,j})) & & {(j +1)!= label_{i,\,0}}
\end{array} \right.
We know that
.. math::
\sigma(x_j) = \frac{1}{1 + \exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
# MASKED: sigmoid_focal_loss function (lines 472-616)
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
| 472 | 616 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
iou_similarity
|
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
# MASKED: iou_similarity function (lines 761-812)
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
| 761 | 812 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
polygon_box_transform
|
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
# MASKED: polygon_box_transform function (lines 967-998)
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
| 967 | 998 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
yolo_box
|
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
# MASKED: yolo_box function (lines 1131-1219)
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
| 1,131 | 1,219 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
anchor_generator
|
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
# MASKED: anchor_generator function (lines 2402-2504)
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
| 2,402 | 2,504 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
generate_mask_labels
|
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
# MASKED: generate_mask_labels function (lines 2737-2883)
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
| 2,737 | 2,883 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
box_decoder_and_assign
|
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
# MASKED: box_decoder_and_assign function (lines 3742-3815)
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
| 3,742 | 3,815 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
|
get_node_list
|
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
# MASKED: get_node_list function (lines 267-305)
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
| 267 | 305 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
node
|
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
# MASKED: node function (lines 307-344)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
| 307 | 344 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
nodes
|
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
# MASKED: nodes function (lines 346-383)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
| 346 | 383 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
get_nodes
|
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
# MASKED: get_nodes function (lines 385-439)
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
| 385 | 439 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
properties
|
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
# MASKED: properties function (lines 441-470)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
| 441 | 470 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
msql
|
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
# MASKED: msql function (lines 738-776)
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
| 738 | 776 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
|
stack_load
|
This fixture is used to run `stack load` on the host during integration tests.
There are 4 essentially equivalent ways of loading and running a dump.json. Using
this test fixture ensures that all 4 are tested. I.E:
stack load dump_file exec=True
stack load document=dump_file exec=True
stack load dump_file | bash -x
stack load document=dump_file | bash -x
|
import json
import subprocess
import ipaddress
import pytest
@pytest.fixture
def add_host():
def _inner(hostname, rack, rank, appliance):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
# First use of the fixture adds backend-0-0
_inner('backend-0-0', '0', '0', 'backend')
# Then return the inner function, so we can call it inside the test
# to get more hosts added
return _inner
@pytest.fixture
def add_host_with_interface():
def _inner(hostname, rack, rank, appliance, interface):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack add host interface {hostname} interface={interface}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy interface')
_inner('backend-0-0', '0', '0', 'backend', 'eth0')
return _inner
@pytest.fixture
def add_ib_switch():
def _inner(hostname, rack, rank, appliance, make, model, sw_type):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack set host attr {hostname} attr=component.make value={make}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set make')
cmd = f'stack set host attr {hostname} attr=component.model value={model}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set model')
cmd = f'stack set host attr {hostname} attr=switch_type value={sw_type}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set switch type')
_inner('switch-0-0', '0', '0', 'switch', 'Mellanox', 'm7800', 'infiniband')
return _inner
@pytest.fixture
def add_ib_switch_partition():
def _inner(switch_name, partition_name, options):
cmd = f'stack add switch partition {switch_name} name={partition_name} '
if options is not None:
cmd += f'options={options}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy switch partition')
_inner('switch-0-0', 'Default', '')
return _inner
@pytest.fixture
def add_switch():
def _inner(hostname, rack, rank, appliance, make, model):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack set host attr {hostname} attr=component.make value={make}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set make')
cmd = f'stack set host attr {hostname} attr=component.model value={model}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set model')
_inner('switch-0-0', '0', '0', 'switch', 'fake', 'unrl')
return _inner
@pytest.fixture
def add_appliance(host):
def _inner(name):
result = host.run(f'stack add appliance {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy appliance "{name}"')
# First use of the fixture adds appliance "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more appliances added
return _inner
@pytest.fixture
def add_box(host):
def _inner(name):
result = host.run(f'stack add box {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy box "{name}"')
# First use of the fixture adds box "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more boxes added
return _inner
@pytest.fixture
def add_cart(host):
def _inner(name):
result = host.run(f'stack add cart {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy cart "{name}"')
# First use of the fixture adds cart "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more carts added
return _inner
@pytest.fixture
def add_environment(host):
def _inner(name):
result = host.run(f'stack add environment {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy environment "{name}"')
# First use of the fixture adds environment "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more environments added
return _inner
@pytest.fixture
def add_group(host):
def _inner(name):
result = host.run(f'stack add group {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy group "{name}"')
# First use of the fixture adds group "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more groups added
return _inner
@pytest.fixture
def add_network(host):
"""Adds a network to the stacki db. For historical reasons the first test network this creates is pxe=False."""
def _inner(name, address, pxe = False):
result = host.run(
f'stack add network {name} address={address} mask=255.255.255.0 pxe={pxe}'
)
if result.rc != 0:
pytest.fail(f'unable to add dummy network "{name}"')
# First use of the fixture adds network "test"
_inner('test', '192.168.0.0')
# Then return the inner function, so we can call it inside the test
# to get more networks added
return _inner
@pytest.fixture
def add_host_with_net(host, add_host_with_interface, add_network):
"""Adds a host with a network. The first network this adds defaults to pxe=True."""
def _inner(hostname, rack, rank, appliance, interface, ip, network, address, pxe):
# Add the host with an interface.
add_host_with_interface(hostname = hostname, rack = rack, rank = rank, appliance = appliance, interface = interface)
# Add the network.
add_network(name = network, address = address, pxe = pxe)
# Associate it to the interface.
result = host.run(f"stack set host interface network {hostname} network={network} interface={interface}")
assert result.rc == 0
# Set the interface IP
result = host.run(f"stack set host interface ip {hostname} ip={ip} network={network}")
assert result.rc == 0
# Add it to the frontend, because a lot of things in stacki expect backends to share networks with
# frontends.
result = host.run("stack list host interface a:frontend output-format=json")
assert result.rc == 0
# Try to figure out if the frontend has an interface on this network already.
interface_on_network = False
for frontend_interface in json.loads(result.stdout):
if frontend_interface["network"] == network:
interface_on_network = True
break
if interface_on_network:
return
# Need to add an interface to the frontend on this network. Make sure we choose the next latest
# interface name so we don't clash with other interface names.
latest_interface = max(frontend_interface["interface"] for frontend_interface in json.loads(result.stdout))
# This should be a string, so we tokenize it into characters
new_interface = list(latest_interface)
new_interface[-1] = str(int(new_interface[-1]) + 1)
new_interface = "".join(new_interface)
result = host.run(f"stack add host interface a:frontend interface={new_interface} network={network} ip={ipaddress.ip_address(ip) + 1}")
assert result.rc == 0
# First use of the add_host_with_interface fixture adds backend-0-0 with interface eth0.
# The first use of add_network adds a network called test, but that's not PXE so we don't want to use it.
# So the first call of this fixture needs to remove the test network, recreate it as a PXE network, and
# associate the network with the host's interface.
result = host.run(f"stack remove network test")
assert result.rc == 0
add_network(name = "test", address = "192.168.0.0", pxe = True)
result = host.run(f"stack set host interface network backend-0-0 network=test interface=eth0 ip=192.168.0.3")
assert result.rc == 0
# Add a frontend interface on the network.
result = host.run(f"stack add host interface a:frontend interface=eth2 network=test ip=192.168.0.2")
assert result.rc == 0
return _inner
# MASKED: stack_load function (lines 251-282)
@pytest.fixture
def fake_local_firmware_file(tmp_path_factory):
"""Creates a fake local firmware file and returns a pathlib.Path object that points to it."""
# Add a fake piece of firmware.
fake_firmware_file = tmp_path_factory.mktemp("fake_firmware") / "foo.img"
fake_firmware_file.write_text("foofakefirmware")
return fake_firmware_file
|
@pytest.fixture(
params = (
("", "exec=True"),
("", "| bash -x"),
("document=", "exec=True"),
("document=", "| bash -x"),
),
ids = ("stack_load_exec", "stack_load_bash", "stack_load_document_exec", "stack_load_document_bash"),
)
def stack_load(request, host):
"""This fixture is used to run `stack load` on the host during integration tests.
There are 4 essentially equivalent ways of loading and running a dump.json. Using
this test fixture ensures that all 4 are tested. I.E:
stack load dump_file exec=True
stack load document=dump_file exec=True
stack load dump_file | bash -x
stack load document=dump_file | bash -x
"""
param_string, exec_string = request.param
def _load(dump_file, **kwargs):
if "exec" in kwargs:
raise ValueError("Cannot pass exec param to this fixture. It handles it for you.")
if "document" in kwargs:
raise ValueError("Cannot pass document param to this fixture. It handles it for you.")
kwargs_string = " ".join(f"{key}={value}" for key, value in kwargs.items())
return host.run(f"stack load {param_string}{dump_file} {exec_string} {kwargs_string}")
return _load
| 251 | 282 |
import json
import subprocess
import ipaddress
import pytest
@pytest.fixture
def add_host():
def _inner(hostname, rack, rank, appliance):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
# First use of the fixture adds backend-0-0
_inner('backend-0-0', '0', '0', 'backend')
# Then return the inner function, so we can call it inside the test
# to get more hosts added
return _inner
@pytest.fixture
def add_host_with_interface():
def _inner(hostname, rack, rank, appliance, interface):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack add host interface {hostname} interface={interface}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy interface')
_inner('backend-0-0', '0', '0', 'backend', 'eth0')
return _inner
@pytest.fixture
def add_ib_switch():
def _inner(hostname, rack, rank, appliance, make, model, sw_type):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack set host attr {hostname} attr=component.make value={make}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set make')
cmd = f'stack set host attr {hostname} attr=component.model value={model}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set model')
cmd = f'stack set host attr {hostname} attr=switch_type value={sw_type}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set switch type')
_inner('switch-0-0', '0', '0', 'switch', 'Mellanox', 'm7800', 'infiniband')
return _inner
@pytest.fixture
def add_ib_switch_partition():
def _inner(switch_name, partition_name, options):
cmd = f'stack add switch partition {switch_name} name={partition_name} '
if options is not None:
cmd += f'options={options}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy switch partition')
_inner('switch-0-0', 'Default', '')
return _inner
@pytest.fixture
def add_switch():
def _inner(hostname, rack, rank, appliance, make, model):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack set host attr {hostname} attr=component.make value={make}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set make')
cmd = f'stack set host attr {hostname} attr=component.model value={model}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set model')
_inner('switch-0-0', '0', '0', 'switch', 'fake', 'unrl')
return _inner
@pytest.fixture
def add_appliance(host):
def _inner(name):
result = host.run(f'stack add appliance {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy appliance "{name}"')
# First use of the fixture adds appliance "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more appliances added
return _inner
@pytest.fixture
def add_box(host):
def _inner(name):
result = host.run(f'stack add box {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy box "{name}"')
# First use of the fixture adds box "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more boxes added
return _inner
@pytest.fixture
def add_cart(host):
def _inner(name):
result = host.run(f'stack add cart {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy cart "{name}"')
# First use of the fixture adds cart "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more carts added
return _inner
@pytest.fixture
def add_environment(host):
def _inner(name):
result = host.run(f'stack add environment {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy environment "{name}"')
# First use of the fixture adds environment "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more environments added
return _inner
@pytest.fixture
def add_group(host):
def _inner(name):
result = host.run(f'stack add group {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy group "{name}"')
# First use of the fixture adds group "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more groups added
return _inner
@pytest.fixture
def add_network(host):
"""Adds a network to the stacki db. For historical reasons the first test network this creates is pxe=False."""
def _inner(name, address, pxe = False):
result = host.run(
f'stack add network {name} address={address} mask=255.255.255.0 pxe={pxe}'
)
if result.rc != 0:
pytest.fail(f'unable to add dummy network "{name}"')
# First use of the fixture adds network "test"
_inner('test', '192.168.0.0')
# Then return the inner function, so we can call it inside the test
# to get more networks added
return _inner
@pytest.fixture
def add_host_with_net(host, add_host_with_interface, add_network):
"""Adds a host with a network. The first network this adds defaults to pxe=True."""
def _inner(hostname, rack, rank, appliance, interface, ip, network, address, pxe):
# Add the host with an interface.
add_host_with_interface(hostname = hostname, rack = rack, rank = rank, appliance = appliance, interface = interface)
# Add the network.
add_network(name = network, address = address, pxe = pxe)
# Associate it to the interface.
result = host.run(f"stack set host interface network {hostname} network={network} interface={interface}")
assert result.rc == 0
# Set the interface IP
result = host.run(f"stack set host interface ip {hostname} ip={ip} network={network}")
assert result.rc == 0
# Add it to the frontend, because a lot of things in stacki expect backends to share networks with
# frontends.
result = host.run("stack list host interface a:frontend output-format=json")
assert result.rc == 0
# Try to figure out if the frontend has an interface on this network already.
interface_on_network = False
for frontend_interface in json.loads(result.stdout):
if frontend_interface["network"] == network:
interface_on_network = True
break
if interface_on_network:
return
# Need to add an interface to the frontend on this network. Make sure we choose the next latest
# interface name so we don't clash with other interface names.
latest_interface = max(frontend_interface["interface"] for frontend_interface in json.loads(result.stdout))
# This should be a string, so we tokenize it into characters
new_interface = list(latest_interface)
new_interface[-1] = str(int(new_interface[-1]) + 1)
new_interface = "".join(new_interface)
result = host.run(f"stack add host interface a:frontend interface={new_interface} network={network} ip={ipaddress.ip_address(ip) + 1}")
assert result.rc == 0
# First use of the add_host_with_interface fixture adds backend-0-0 with interface eth0.
# The first use of add_network adds a network called test, but that's not PXE so we don't want to use it.
# So the first call of this fixture needs to remove the test network, recreate it as a PXE network, and
# associate the network with the host's interface.
result = host.run(f"stack remove network test")
assert result.rc == 0
add_network(name = "test", address = "192.168.0.0", pxe = True)
result = host.run(f"stack set host interface network backend-0-0 network=test interface=eth0 ip=192.168.0.3")
assert result.rc == 0
# Add a frontend interface on the network.
result = host.run(f"stack add host interface a:frontend interface=eth2 network=test ip=192.168.0.2")
assert result.rc == 0
return _inner
@pytest.fixture(
params = (
("", "exec=True"),
("", "| bash -x"),
("document=", "exec=True"),
("document=", "| bash -x"),
),
ids = ("stack_load_exec", "stack_load_bash", "stack_load_document_exec", "stack_load_document_bash"),
)
def stack_load(request, host):
"""This fixture is used to run `stack load` on the host during integration tests.
There are 4 essentially equivalent ways of loading and running a dump.json. Using
this test fixture ensures that all 4 are tested. I.E:
stack load dump_file exec=True
stack load document=dump_file exec=True
stack load dump_file | bash -x
stack load document=dump_file | bash -x
"""
param_string, exec_string = request.param
def _load(dump_file, **kwargs):
if "exec" in kwargs:
raise ValueError("Cannot pass exec param to this fixture. It handles it for you.")
if "document" in kwargs:
raise ValueError("Cannot pass document param to this fixture. It handles it for you.")
kwargs_string = " ".join(f"{key}={value}" for key, value in kwargs.items())
return host.run(f"stack load {param_string}{dump_file} {exec_string} {kwargs_string}")
return _load
@pytest.fixture
def fake_local_firmware_file(tmp_path_factory):
"""Creates a fake local firmware file and returns a pathlib.Path object that points to it."""
# Add a fake piece of firmware.
fake_firmware_file = tmp_path_factory.mktemp("fake_firmware") / "foo.img"
fake_firmware_file.write_text("foofakefirmware")
return fake_firmware_file
|
_is_token
|
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
|
"""SentencePiece based word tokenizer module"""
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
# MASKED: _is_token function (lines 10-30)
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
def _is_model_available(model_path: str) -> None:
"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
|
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
| 10 | 30 |
"""SentencePiece based word tokenizer module"""
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
def _is_model_available(model_path: str) -> None:
"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
|
_load_model
|
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
|
"""SentencePiece based word tokenizer module"""
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
# MASKED: _load_model function (lines 33-44)
def _is_model_available(model_path: str) -> None:
"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
|
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
| 33 | 44 |
"""SentencePiece based word tokenizer module"""
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
def _is_model_available(model_path: str) -> None:
"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
|
startup
|
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
|
"""
My first application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
# MASKED: startup function (lines 11-42)
def say_hello(self, widget):
if self.name_input.value:
name = self.name_input.value
else:
name = 'stranger'
self.main_window.info_dialog(
'Hi there!',
f"Hello, {name}"
)
def main():
return HelloWorld()
|
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
main_box = toga.Box(style=Pack(direction=COLUMN))
name_label = toga.Label(
'Your name: ',
style=Pack(padding=(0 ,5))
)
self.name_input = toga.TextInput(style=Pack(flex=1))
name_box = toga.Box(style=Pack(direction=ROW, padding=5))
name_box.add(name_label)
name_box.add(self.name_input)
button = toga.Button(
'Say Hello!',
on_press=self.say_hello,
style=Pack(padding=5)
)
main_box.add(name_box)
main_box.add(button)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
| 11 | 42 |
"""
My first application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
main_box = toga.Box(style=Pack(direction=COLUMN))
name_label = toga.Label(
'Your name: ',
style=Pack(padding=(0 ,5))
)
self.name_input = toga.TextInput(style=Pack(flex=1))
name_box = toga.Box(style=Pack(direction=ROW, padding=5))
name_box.add(name_label)
name_box.add(self.name_input)
button = toga.Button(
'Say Hello!',
on_press=self.say_hello,
style=Pack(padding=5)
)
main_box.add(name_box)
main_box.add(button)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def say_hello(self, widget):
if self.name_input.value:
name = self.name_input.value
else:
name = 'stranger'
self.main_window.info_dialog(
'Hi there!',
f"Hello, {name}"
)
def main():
return HelloWorld()
|
get
|
Get an existing ApiOperation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ApiOperation']
class ApiOperation(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
method: Optional[pulumi.Input[str]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[str]] = None,
request: Optional[pulumi.Input[pulumi.InputType['RequestContractArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
responses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResponseContractArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
template_parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ParameterContractArgs']]]]] = None,
url_template: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Api Operation details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param pulumi.Input[str] description: Description of the operation. May include HTML formatting tags.
:param pulumi.Input[str] display_name: Operation Name.
:param pulumi.Input[str] method: A Valid HTTP Operation Method. Typical Http Methods like GET, PUT, POST but not limited by only them.
:param pulumi.Input[str] operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] policies: Operation Policies
:param pulumi.Input[pulumi.InputType['RequestContractArgs']] request: An entity containing request details.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResponseContractArgs']]]] responses: Array of Operation responses.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ParameterContractArgs']]]] template_parameters: Collection of URL template parameters.
:param pulumi.Input[str] url_template: Relative URL template identifying the target resource for this operation. May include parameters. Example: /customers/{cid}/orders/{oid}/?date={date}
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__['api_id'] = api_id
__props__['description'] = description
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
if method is None and not opts.urn:
raise TypeError("Missing required property 'method'")
__props__['method'] = method
__props__['operation_id'] = operation_id
__props__['policies'] = policies
__props__['request'] = request
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['responses'] = responses
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['template_parameters'] = template_parameters
if url_template is None and not opts.urn:
raise TypeError("Missing required property 'url_template'")
__props__['url_template'] = url_template
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiOperation")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiOperation, __self__).__init__(
'azure-nextgen:apimanagement/v20200601preview:ApiOperation',
resource_name,
__props__,
opts)
# MASKED: get function (lines 104-120)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the operation. May include HTML formatting tags.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Operation Name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def method(self) -> pulumi.Output[str]:
"""
A Valid HTTP Operation Method. Typical Http Methods like GET, PUT, POST but not limited by only them.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Optional[str]]:
"""
Operation Policies
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter
def request(self) -> pulumi.Output[Optional['outputs.RequestContractResponse']]:
"""
An entity containing request details.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def responses(self) -> pulumi.Output[Optional[Sequence['outputs.ResponseContractResponse']]]:
"""
Array of Operation responses.
"""
return pulumi.get(self, "responses")
@property
@pulumi.getter(name="templateParameters")
def template_parameters(self) -> pulumi.Output[Optional[Sequence['outputs.ParameterContractResponse']]]:
"""
Collection of URL template parameters.
"""
return pulumi.get(self, "template_parameters")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlTemplate")
def url_template(self) -> pulumi.Output[str]:
"""
Relative URL template identifying the target resource for this operation. May include parameters. Example: /customers/{cid}/orders/{oid}/?date={date}
"""
return pulumi.get(self, "url_template")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiOperation':
"""
Get an existing ApiOperation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ApiOperation(resource_name, opts=opts, __props__=__props__)
| 104 | 120 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ApiOperation']
class ApiOperation(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
method: Optional[pulumi.Input[str]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[str]] = None,
request: Optional[pulumi.Input[pulumi.InputType['RequestContractArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
responses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResponseContractArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
template_parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ParameterContractArgs']]]]] = None,
url_template: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Api Operation details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param pulumi.Input[str] description: Description of the operation. May include HTML formatting tags.
:param pulumi.Input[str] display_name: Operation Name.
:param pulumi.Input[str] method: A Valid HTTP Operation Method. Typical Http Methods like GET, PUT, POST but not limited by only them.
:param pulumi.Input[str] operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] policies: Operation Policies
:param pulumi.Input[pulumi.InputType['RequestContractArgs']] request: An entity containing request details.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResponseContractArgs']]]] responses: Array of Operation responses.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ParameterContractArgs']]]] template_parameters: Collection of URL template parameters.
:param pulumi.Input[str] url_template: Relative URL template identifying the target resource for this operation. May include parameters. Example: /customers/{cid}/orders/{oid}/?date={date}
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__['api_id'] = api_id
__props__['description'] = description
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
if method is None and not opts.urn:
raise TypeError("Missing required property 'method'")
__props__['method'] = method
__props__['operation_id'] = operation_id
__props__['policies'] = policies
__props__['request'] = request
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['responses'] = responses
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['template_parameters'] = template_parameters
if url_template is None and not opts.urn:
raise TypeError("Missing required property 'url_template'")
__props__['url_template'] = url_template
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiOperation"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiOperation")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiOperation, __self__).__init__(
'azure-nextgen:apimanagement/v20200601preview:ApiOperation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiOperation':
"""
Get an existing ApiOperation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ApiOperation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the operation. May include HTML formatting tags.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Operation Name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def method(self) -> pulumi.Output[str]:
"""
A Valid HTTP Operation Method. Typical Http Methods like GET, PUT, POST but not limited by only them.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Optional[str]]:
"""
Operation Policies
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter
def request(self) -> pulumi.Output[Optional['outputs.RequestContractResponse']]:
"""
An entity containing request details.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def responses(self) -> pulumi.Output[Optional[Sequence['outputs.ResponseContractResponse']]]:
"""
Array of Operation responses.
"""
return pulumi.get(self, "responses")
@property
@pulumi.getter(name="templateParameters")
def template_parameters(self) -> pulumi.Output[Optional[Sequence['outputs.ParameterContractResponse']]]:
"""
Collection of URL template parameters.
"""
return pulumi.get(self, "template_parameters")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlTemplate")
def url_template(self) -> pulumi.Output[str]:
"""
Relative URL template identifying the target resource for this operation. May include parameters. Example: /customers/{cid}/orders/{oid}/?date={date}
"""
return pulumi.get(self, "url_template")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
shortDescription
|
Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
|
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
# MASKED: shortDescription function (lines 81-98)
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
| 81 | 98 |
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
precreate_tempfiles
|
Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
|
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
# MASKED: precreate_tempfiles function (lines 143-181)
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
| 143 | 181 |
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
assertDiffEqual
|
Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
|
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
# MASKED: assertDiffEqual function (lines 183-200)
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
| 183 | 200 |
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
reviewboardrc
|
Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
|
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
# MASKED: reviewboardrc function (lines 220-263)
|
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
| 220 | 263 |
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
create
|
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
# MASKED: create function (lines 149-211)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 149 | 211 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
locate
|
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
# MASKED: locate function (lines 503-559)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 503 | 559 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
get_attributes
|
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
# MASKED: get_attributes function (lines 631-673)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 631 | 673 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
get_attribute_list
|
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
# MASKED: get_attribute_list function (lines 675-703)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 675 | 703 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
activate
|
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
# MASKED: activate function (lines 705-736)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 705 | 736 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
revoke
|
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
# MASKED: revoke function (lines 738-792)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 738 | 792 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
destroy
|
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
# MASKED: destroy function (lines 794-824)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 794 | 824 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
encrypt
|
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
# MASKED: encrypt function (lines 826-931)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
| 826 | 931 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
decrypt
|
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
# MASKED: decrypt function (lines 933-1036)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
| 933 | 1,036 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
signature_verify
|
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
# MASKED: signature_verify function (lines 1038-1102)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
| 1,038 | 1,102 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
sign
|
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
# MASKED: sign function (lines 1104-1157)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
| 1,104 | 1,157 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
mac
|
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
# MASKED: mac function (lines 1159-1207)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
| 1,159 | 1,207 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
_make_api_call
|
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch requests for unit testing
|
"""
Lowest level connection
"""
from __future__ import division
import logging
import math
import random
import time
import uuid
import warnings
from base64 import b64decode
from threading import local
import six
from botocore.client import ClientError
from botocore.exceptions import BotoCoreError
from botocore.session import get_session
from botocore.vendored import requests
from botocore.vendored.requests import Request
from six.moves import range
from pynamodb.compat import NullHandler
from pynamodb.connection.util import pythonic
from pynamodb.constants import (
RETURN_CONSUMED_CAPACITY_VALUES, RETURN_ITEM_COLL_METRICS_VALUES, COMPARISON_OPERATOR_VALUES,
RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY, RETURN_VALUES_VALUES, ATTR_UPDATE_ACTIONS,
COMPARISON_OPERATOR, EXCLUSIVE_START_KEY, SCAN_INDEX_FORWARD, SCAN_FILTER_VALUES, ATTR_DEFINITIONS,
BATCH_WRITE_ITEM, CONSISTENT_READ, ATTR_VALUE_LIST, DESCRIBE_TABLE, KEY_CONDITION_EXPRESSION,
BATCH_GET_ITEM, DELETE_REQUEST, SELECT_VALUES, RETURN_VALUES, REQUEST_ITEMS, ATTR_UPDATES,
PROJECTION_EXPRESSION, SERVICE_NAME, DELETE_ITEM, PUT_REQUEST, UPDATE_ITEM, SCAN_FILTER, TABLE_NAME,
INDEX_NAME, KEY_SCHEMA, ATTR_NAME, ATTR_TYPE, TABLE_KEY, EXPECTED, KEY_TYPE, GET_ITEM, UPDATE,
PUT_ITEM, SELECT, ACTION, EXISTS, VALUE, LIMIT, QUERY, SCAN, ITEM, LOCAL_SECONDARY_INDEXES,
KEYS, KEY, EQ, SEGMENT, TOTAL_SEGMENTS, CREATE_TABLE, PROVISIONED_THROUGHPUT, READ_CAPACITY_UNITS,
WRITE_CAPACITY_UNITS, GLOBAL_SECONDARY_INDEXES, PROJECTION, EXCLUSIVE_START_TABLE_NAME, TOTAL,
DELETE_TABLE, UPDATE_TABLE, LIST_TABLES, GLOBAL_SECONDARY_INDEX_UPDATES, ATTRIBUTES,
CONSUMED_CAPACITY, CAPACITY_UNITS, QUERY_FILTER, QUERY_FILTER_VALUES, CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS, NULL, NOT_NULL, SHORT_ATTR_TYPES, DELETE, PUT,
ITEMS, DEFAULT_ENCODING, BINARY_SHORT, BINARY_SET_SHORT, LAST_EVALUATED_KEY, RESPONSES, UNPROCESSED_KEYS,
UNPROCESSED_ITEMS, STREAM_SPECIFICATION, STREAM_VIEW_TYPE, STREAM_ENABLED, UPDATE_EXPRESSION,
EXPRESSION_ATTRIBUTE_NAMES, EXPRESSION_ATTRIBUTE_VALUES, KEY_CONDITION_OPERATOR_MAP,
CONDITION_EXPRESSION, FILTER_EXPRESSION, FILTER_EXPRESSION_OPERATOR_MAP, NOT_CONTAINS, AND)
from pynamodb.exceptions import (
TableError, QueryError, PutError, DeleteError, UpdateError, GetError, ScanError, TableDoesNotExist,
VerboseClientError
)
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.operand import Path
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Update
from pynamodb.settings import get_settings_value
from pynamodb.signals import pre_dynamodb_send, post_dynamodb_send
from pynamodb.types import HASH, RANGE
BOTOCORE_EXCEPTIONS = (BotoCoreError, ClientError)
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class MetaTable(object):
"""
A pythonic wrapper around table metadata
"""
def __init__(self, data):
self.data = data or {}
self._range_keyname = None
self._hash_keyname = None
def __repr__(self):
if self.data:
return six.u("MetaTable<{0}>".format(self.data.get(TABLE_NAME)))
@property
def range_keyname(self):
"""
Returns the name of this table's range key
"""
if self._range_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == RANGE:
self._range_keyname = attr.get(ATTR_NAME)
return self._range_keyname
@property
def hash_keyname(self):
"""
Returns the name of this table's hash key
"""
if self._hash_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == HASH:
self._hash_keyname = attr.get(ATTR_NAME)
break
return self._hash_keyname
def get_key_names(self, index_name=None):
"""
Returns the names of the primary key attributes and index key attributes (if index_name is specified)
"""
key_names = [self.hash_keyname]
if self.range_keyname:
key_names.append(self.range_keyname)
if index_name is not None:
index_hash_keyname = self.get_index_hash_keyname(index_name)
if index_hash_keyname not in key_names:
key_names.append(index_hash_keyname)
index_range_keyname = self.get_index_range_keyname(index_name)
if index_range_keyname is not None and index_range_keyname not in key_names:
key_names.append(index_range_keyname)
return key_names
def get_index_hash_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == HASH:
return schema_key.get(ATTR_NAME)
def get_index_range_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == RANGE:
return schema_key.get(ATTR_NAME)
return None
def get_item_attribute_map(self, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
if pythonic_key:
item_key = item_key
attr_map = {
item_key: {}
}
for key, value in attributes.items():
# In this case, the user provided a mapping
# {'key': {'S': 'value'}}
if isinstance(value, dict):
attr_map[item_key][key] = value
else:
attr_map[item_key][key] = {
self.get_attribute_type(key): value
}
return attr_map
def get_attribute_type(self, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
"""
for attr in self.data.get(ATTR_DEFINITIONS):
if attr.get(ATTR_NAME) == attribute_name:
return attr.get(ATTR_TYPE)
if value is not None and isinstance(value, dict):
for key in SHORT_ATTR_TYPES:
if key in value:
return key
attr_names = [attr.get(ATTR_NAME) for attr in self.data.get(ATTR_DEFINITIONS)]
raise ValueError("No attribute {0} in {1}".format(attribute_name, attr_names))
def get_identifier_map(self, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
kwargs = {
key: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): hash_key
}
}
}
if range_key is not None:
kwargs[key][self.range_keyname] = {
self.get_attribute_type(self.range_keyname): range_key
}
return kwargs
def get_exclusive_start_key_map(self, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
if isinstance(exclusive_start_key, dict) and self.hash_keyname in exclusive_start_key:
# This is useful when paginating results, as the LastEvaluatedKey returned is already
# structured properly
return {
EXCLUSIVE_START_KEY: exclusive_start_key
}
else:
return {
EXCLUSIVE_START_KEY: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): exclusive_start_key
}
}
}
class Connection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self, region=None, host=None, session_cls=None,
request_timeout_seconds=None, max_retry_attempts=None, base_backoff_ms=None):
self._tables = {}
self.host = host
self._local = local()
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value('region')
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value('session_cls')
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value('request_timeout_seconds')
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value('max_retry_attempts')
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value('base_backoff_ms')
def __repr__(self):
return six.u("Connection<{0}>".format(self.client.meta.endpoint_url))
def _log_debug(self, operation, kwargs):
"""
Sends a debug message to the logger
"""
log.debug("Calling %s with arguments %s", operation, kwargs)
def _log_debug_response(self, operation, response):
"""
Sends a debug message to the logger about a response
"""
log.debug("%s response: %s", operation, response)
def _log_error(self, operation, response):
"""
Sends an error message to the logger
"""
log.error("%s failed with status: %s, message: %s",
operation, response.status_code,response.content)
def _create_prepared_request(self, request_dict, operation_model):
"""
Create a prepared request object from request_dict, and operation_model
"""
boto_prepared_request = self.client._endpoint.create_request(request_dict, operation_model)
# The call requests_session.send(final_prepared_request) ignores the headers which are
# part of the request session. In order to include the requests session headers inside
# the request, we create a new request object, and call prepare_request with the newly
# created request object
raw_request_with_params = Request(
boto_prepared_request.method,
boto_prepared_request.url,
data=boto_prepared_request.body,
headers=boto_prepared_request.headers
)
return self.requests_session.prepare_request(raw_request_with_params)
def dispatch(self, operation_name, operation_kwargs):
"""
Dispatches `operation_name` with arguments `operation_kwargs`
Raises TableDoesNotExist if the specified table does not exist
"""
if operation_name not in [DESCRIBE_TABLE, LIST_TABLES, UPDATE_TABLE, DELETE_TABLE, CREATE_TABLE]:
if RETURN_CONSUMED_CAPACITY not in operation_kwargs:
operation_kwargs.update(self.get_consumed_capacity_map(TOTAL))
self._log_debug(operation_name, operation_kwargs)
table_name = operation_kwargs.get(TABLE_NAME)
req_uuid = uuid.uuid4()
self.send_pre_boto_callback(operation_name, req_uuid, table_name)
data = self._make_api_call(operation_name, operation_kwargs)
self.send_post_boto_callback(operation_name, req_uuid, table_name)
if data and CONSUMED_CAPACITY in data:
capacity = data.get(CONSUMED_CAPACITY)
if isinstance(capacity, dict) and CAPACITY_UNITS in capacity:
capacity = capacity.get(CAPACITY_UNITS)
log.debug("%s %s consumed %s units", data.get(TABLE_NAME, ''), operation_name, capacity)
return data
def send_post_boto_callback(self, operation_name, req_uuid, table_name):
try:
post_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("post_boto callback threw an exception.")
def send_pre_boto_callback(self, operation_name, req_uuid, table_name):
try:
pre_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("pre_boto callback threw an exception.")
# MASKED: _make_api_call function (lines 335-418)
@staticmethod
def _handle_binary_attributes(data):
""" Simulate botocore's binary attribute handling """
if ITEM in data:
for attr in six.itervalues(data[ITEM]):
_convert_binary(attr)
if ITEMS in data:
for item in data[ITEMS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if RESPONSES in data:
for item_list in six.itervalues(data[RESPONSES]):
for item in item_list:
for attr in six.itervalues(item):
_convert_binary(attr)
if LAST_EVALUATED_KEY in data:
for attr in six.itervalues(data[LAST_EVALUATED_KEY]):
_convert_binary(attr)
if UNPROCESSED_KEYS in data:
for table_data in six.itervalues(data[UNPROCESSED_KEYS]):
for item in table_data[KEYS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if UNPROCESSED_ITEMS in data:
for table_unprocessed_requests in six.itervalues(data[UNPROCESSED_ITEMS]):
for request in table_unprocessed_requests:
for item_mapping in six.itervalues(request):
for item in six.itervalues(item_mapping):
for attr in six.itervalues(item):
_convert_binary(attr)
if ATTRIBUTES in data:
for attr in six.itervalues(data[ATTRIBUTES]):
_convert_binary(attr)
return data
@property
def session(self):
"""
Returns a valid botocore session
"""
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, 'session', None) is None:
self._local.session = get_session()
return self._local.session
@property
def requests_session(self):
"""
Return a requests session to execute prepared requests using the same pool
"""
if self._requests_session is None:
self._requests_session = self.session_cls()
return self._requests_session
@property
def client(self):
"""
Returns a botocore dynamodb client
"""
# botocore has a known issue where it will cache empty credentials
# https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.py#L1016-L1021
# if the client does not have credentials, we create a new client
# otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles
if not self._client or (self._client._request_signer and not self._client._request_signer._credentials):
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
return self._client
def get_meta_table(self, table_name, refresh=False):
"""
Returns a MetaTable
"""
if table_name not in self._tables or refresh:
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
self._tables[table_name] = MetaTable(data.get(TABLE_KEY))
except BotoCoreError as e:
raise TableError("Unable to describe table: {0}".format(e), e)
except ClientError as e:
if 'ResourceNotFound' in e.response['Error']['Code']:
raise TableDoesNotExist(e.response['Error']['Message'])
else:
raise
return self._tables[table_name]
def create_table(self,
table_name,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None,
stream_specification=None):
"""
Performs the CreateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name,
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
}
attrs_list = []
if attribute_definitions is None:
raise ValueError("attribute_definitions argument is required")
for attr in attribute_definitions:
attrs_list.append({
ATTR_NAME: attr.get(pythonic(ATTR_NAME)),
ATTR_TYPE: attr.get(pythonic(ATTR_TYPE))
})
operation_kwargs[ATTR_DEFINITIONS] = attrs_list
if global_secondary_indexes:
global_secondary_indexes_list = []
for index in global_secondary_indexes:
global_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
PROVISIONED_THROUGHPUT: index.get(pythonic(PROVISIONED_THROUGHPUT))
})
operation_kwargs[GLOBAL_SECONDARY_INDEXES] = global_secondary_indexes_list
if key_schema is None:
raise ValueError("key_schema is required")
key_schema_list = []
for item in key_schema:
key_schema_list.append({
ATTR_NAME: item.get(pythonic(ATTR_NAME)),
KEY_TYPE: str(item.get(pythonic(KEY_TYPE))).upper()
})
operation_kwargs[KEY_SCHEMA] = sorted(key_schema_list, key=lambda x: x.get(KEY_TYPE))
local_secondary_indexes_list = []
if local_secondary_indexes:
for index in local_secondary_indexes:
local_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
})
operation_kwargs[LOCAL_SECONDARY_INDEXES] = local_secondary_indexes_list
if stream_specification:
operation_kwargs[STREAM_SPECIFICATION] = {
STREAM_ENABLED: stream_specification[pythonic(STREAM_ENABLED)],
STREAM_VIEW_TYPE: stream_specification[pythonic(STREAM_VIEW_TYPE)]
}
try:
data = self.dispatch(CREATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to create table: {0}".format(e), e)
return data
def delete_table(self, table_name):
"""
Performs the DeleteTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DELETE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to delete table: {0}".format(e), e)
return data
def update_table(self,
table_name,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
"""
Performs the UpdateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
if read_capacity_units and not write_capacity_units or write_capacity_units and not read_capacity_units:
raise ValueError("read_capacity_units and write_capacity_units are required together")
if read_capacity_units and write_capacity_units:
operation_kwargs[PROVISIONED_THROUGHPUT] = {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
if global_secondary_index_updates:
global_secondary_indexes_list = []
for index in global_secondary_index_updates:
global_secondary_indexes_list.append({
UPDATE: {
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: index.get(pythonic(READ_CAPACITY_UNITS)),
WRITE_CAPACITY_UNITS: index.get(pythonic(WRITE_CAPACITY_UNITS))
}
}
})
operation_kwargs[GLOBAL_SECONDARY_INDEX_UPDATES] = global_secondary_indexes_list
try:
return self.dispatch(UPDATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update table: {0}".format(e), e)
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Performs the ListTables operation
"""
operation_kwargs = {}
if exclusive_start_table_name:
operation_kwargs.update({
EXCLUSIVE_START_TABLE_NAME: exclusive_start_table_name
})
if limit is not None:
operation_kwargs.update({
LIMIT: limit
})
try:
return self.dispatch(LIST_TABLES, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Unable to list tables: {0}".format(e), e)
def describe_table(self, table_name):
"""
Performs the DescribeTable operation
"""
try:
tbl = self.get_meta_table(table_name, refresh=True)
if tbl:
return tbl.data
except ValueError:
pass
raise TableDoesNotExist(table_name)
def get_conditional_operator(self, operator):
"""
Returns a dictionary containing the correct conditional operator,
validating it first.
"""
operator = operator.upper()
if operator not in CONDITIONAL_OPERATORS:
raise ValueError(
"The {0} must be one of {1}".format(
CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS
)
)
return {
CONDITIONAL_OPERATOR: operator
}
def get_item_attribute_map(self, table_name, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_item_attribute_map(
attributes,
item_key=item_key,
pythonic_key=pythonic_key)
def get_expected_map(self, table_name, expected):
"""
Builds the expected map that is common to several operations
"""
kwargs = {EXPECTED: {}}
for key, condition in expected.items():
if EXISTS in condition:
kwargs[EXPECTED][key] = {
EXISTS: condition.get(EXISTS)
}
elif VALUE in condition:
kwargs[EXPECTED][key] = {
VALUE: {
self.get_attribute_type(table_name, key): condition.get(VALUE)
}
}
elif COMPARISON_OPERATOR in condition:
kwargs[EXPECTED][key] = {
COMPARISON_OPERATOR: condition.get(COMPARISON_OPERATOR),
}
values = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_type = self.get_attribute_type(table_name, key, value)
values.append({attr_type: self.parse_attribute(value)})
if condition.get(COMPARISON_OPERATOR) not in [NULL, NOT_NULL]:
kwargs[EXPECTED][key][ATTR_VALUE_LIST] = values
return kwargs
def parse_attribute(self, attribute, return_type=False):
"""
Returns the attribute value, where the attribute can be
a raw attribute value, or a dictionary containing the type:
{'S': 'String value'}
"""
if isinstance(attribute, dict):
for key in SHORT_ATTR_TYPES:
if key in attribute:
if return_type:
return key, attribute.get(key)
return attribute.get(key)
raise ValueError("Invalid attribute supplied: {0}".format(attribute))
else:
if return_type:
return None, attribute
return attribute
def get_attribute_type(self, table_name, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
:param value: The attribute value an be supplied just in case the type is already included
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_attribute_type(attribute_name, value=value)
def get_identifier_map(self, table_name, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_identifier_map(hash_key, range_key=range_key, key=key)
def get_query_filter_map(self, table_name, query_filters):
"""
Builds the QueryFilter object needed for the Query operation
"""
kwargs = {
QUERY_FILTER: {}
}
for key, condition in query_filters.items():
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
attr_value_list = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_value_list.append({
self.get_attribute_type(table_name, key, value): self.parse_attribute(value)
})
kwargs[QUERY_FILTER][key] = {
COMPARISON_OPERATOR: operator
}
if len(attr_value_list):
kwargs[QUERY_FILTER][key][ATTR_VALUE_LIST] = attr_value_list
return kwargs
def get_consumed_capacity_map(self, return_consumed_capacity):
"""
Builds the consumed capacity map that is common to several operations
"""
if return_consumed_capacity.upper() not in RETURN_CONSUMED_CAPACITY_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY_VALUES))
return {
RETURN_CONSUMED_CAPACITY: str(return_consumed_capacity).upper()
}
def get_return_values_map(self, return_values):
"""
Builds the return values map that is common to several operations
"""
if return_values.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_VALUES, RETURN_VALUES_VALUES))
return {
RETURN_VALUES: str(return_values).upper()
}
def get_item_collection_map(self, return_item_collection_metrics):
"""
Builds the item collection map
"""
if return_item_collection_metrics.upper() not in RETURN_ITEM_COLL_METRICS_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_ITEM_COLL_METRICS_VALUES))
return {
RETURN_ITEM_COLL_METRICS: str(return_item_collection_metrics).upper()
}
def get_exclusive_start_key_map(self, table_name, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_exclusive_start_key_map(exclusive_start_key)
def delete_item(self,
table_name,
hash_key,
range_key=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the DeleteItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(DELETE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise DeleteError("Failed to delete item: {0}".format(e), e)
def update_item(self,
table_name,
hash_key,
range_key=None,
actions=None,
attribute_updates=None,
condition=None,
expected=None,
return_consumed_capacity=None,
conditional_operator=None,
return_item_collection_metrics=None,
return_values=None):
"""
Performs the UpdateItem operation
"""
self._check_actions(actions, attribute_updates)
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if not actions and not attribute_updates:
raise ValueError("{0} cannot be empty".format(ATTR_UPDATES))
actions = actions or []
attribute_updates = attribute_updates or {}
update_expression = Update(*actions)
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(attribute_updates.keys()):
path = Path([key])
update = attribute_updates[key]
action = update.get(ACTION)
if action not in ATTR_UPDATE_ACTIONS:
raise ValueError("{0} must be one of {1}".format(ACTION, ATTR_UPDATE_ACTIONS))
value = update.get(VALUE)
attr_type, value = self.parse_attribute(value, return_type=True)
if attr_type is None and action != DELETE:
attr_type = self.get_attribute_type(table_name, key, value)
value = {attr_type: value}
if action == DELETE:
action = path.remove() if attr_type is None else path.delete(value)
elif action == PUT:
action = path.set(value)
else:
action = path.add(value)
update_expression.add_action(action)
operation_kwargs[UPDATE_EXPRESSION] = update_expression.serialize(name_placeholders, expression_attribute_values)
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(UPDATE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise UpdateError("Failed to update item: {0}".format(e), e)
def put_item(self,
table_name,
hash_key,
range_key=None,
attributes=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the PutItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
name_placeholders = {}
expression_attribute_values = {}
if attributes:
attrs = self.get_item_attribute_map(table_name, attributes)
operation_kwargs[ITEM].update(attrs[ITEM])
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(PUT_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to put item: {0}".format(e), e)
def batch_write_item(self,
table_name,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the batch_write_item operation
"""
if put_items is None and delete_items is None:
raise ValueError("Either put_items or delete_items must be specified")
operation_kwargs = {
REQUEST_ITEMS: {
table_name: []
}
}
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
put_items_list = []
if put_items:
for item in put_items:
put_items_list.append({
PUT_REQUEST: self.get_item_attribute_map(table_name, item, pythonic_key=False)
})
delete_items_list = []
if delete_items:
for item in delete_items:
delete_items_list.append({
DELETE_REQUEST: self.get_item_attribute_map(table_name, item, item_key=KEY, pythonic_key=False)
})
operation_kwargs[REQUEST_ITEMS][table_name] = delete_items_list + put_items_list
try:
return self.dispatch(BATCH_WRITE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to batch write items: {0}".format(e), e)
def batch_get_item(self,
table_name,
keys,
consistent_read=None,
return_consumed_capacity=None,
attributes_to_get=None):
"""
Performs the batch get item operation
"""
operation_kwargs = {
REQUEST_ITEMS: {
table_name: {}
}
}
args_map = {}
name_placeholders = {}
if consistent_read:
args_map[CONSISTENT_READ] = consistent_read
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
args_map[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
args_map[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[REQUEST_ITEMS][table_name].update(args_map)
keys_map = {KEYS: []}
for key in keys:
keys_map[KEYS].append(
self.get_item_attribute_map(table_name, key)[ITEM]
)
operation_kwargs[REQUEST_ITEMS][table_name].update(keys_map)
try:
return self.dispatch(BATCH_GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to batch get items: {0}".format(e), e)
def get_item(self,
table_name,
hash_key,
range_key=None,
consistent_read=False,
attributes_to_get=None):
"""
Performs the GetItem operation and returns the result
"""
operation_kwargs = {}
name_placeholders = {}
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[CONSISTENT_READ] = consistent_read
operation_kwargs[TABLE_NAME] = table_name
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
try:
return self.dispatch(GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to get item: {0}".format(e), e)
def rate_limited_scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
page_size=None,
limit=None,
conditional_operator=None,
scan_filter=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
timeout_seconds=None,
read_capacity_to_consume_per_second=10,
allow_rate_limited_scan_without_consumed_capacity=None,
max_sleep_between_retry=10,
max_consecutive_exceptions=10,
consistent_read=None,
index_name=None):
"""
Performs a rate limited scan on the table. The API uses the scan API to fetch items from
DynamoDB. The rate_limited_scan uses the 'ConsumedCapacity' value returned from DynamoDB to
limit the rate of the scan. 'ProvisionedThroughputExceededException' is also handled and retried.
:param table_name: Name of the table to perform scan on.
:param filter_condition: Condition used to restrict the scan results
:param attributes_to_get: A list of attributes to return.
:param page_size: Page size of the scan to DynamoDB
:param limit: Used to limit the number of results returned
:param conditional_operator:
:param scan_filter: A map indicating the condition that evaluates the scan results
:param exclusive_start_key: If set, provides the starting point for scan.
:param segment: If set, then scans the segment
:param total_segments: If set, then specifies total segments
:param timeout_seconds: Timeout value for the rate_limited_scan method, to prevent it from running
infinitely
:param read_capacity_to_consume_per_second: Amount of read capacity to consume
every second
:param allow_rate_limited_scan_without_consumed_capacity: If set, proceeds without rate limiting if
the server does not support returning consumed capacity in responses.
:param max_sleep_between_retry: Max value for sleep in seconds in between scans during
throttling/rate limit scenarios
:param max_consecutive_exceptions: Max number of consecutive ProvisionedThroughputExceededException
exception for scan to exit
:param consistent_read: enable consistent read
:param index_name: an index to perform the scan on
"""
read_capacity_to_consume_per_ms = float(read_capacity_to_consume_per_second) / 1000
if allow_rate_limited_scan_without_consumed_capacity is None:
allow_rate_limited_scan_without_consumed_capacity = get_settings_value(
'allow_rate_limited_scan_without_consumed_capacity'
)
total_consumed_read_capacity = 0.0
last_evaluated_key = exclusive_start_key
rate_available = True
latest_scan_consumed_capacity = 0
consecutive_provision_throughput_exceeded_ex = 0
start_time = time.time()
if page_size is None:
if limit and read_capacity_to_consume_per_second > limit:
page_size = limit
else:
page_size = read_capacity_to_consume_per_second
while True:
if rate_available:
try:
data = self.scan(
table_name,
filter_condition=filter_condition,
attributes_to_get=attributes_to_get,
exclusive_start_key=last_evaluated_key,
limit=page_size,
conditional_operator=conditional_operator,
return_consumed_capacity=TOTAL,
scan_filter=scan_filter,
segment=segment,
total_segments=total_segments,
consistent_read=consistent_read,
index_name=index_name
)
for item in data.get(ITEMS):
yield item
if limit is not None:
limit -= 1
if not limit:
return
if CONSUMED_CAPACITY in data:
latest_scan_consumed_capacity = data.get(CONSUMED_CAPACITY).get(CAPACITY_UNITS)
else:
if allow_rate_limited_scan_without_consumed_capacity:
latest_scan_consumed_capacity = 0
else:
raise ScanError('Rate limited scan not possible because the server did not send back'
'consumed capacity information. If you wish scans to complete anyway'
'without functioning rate limiting, set '
'allow_rate_limited_scan_without_consumed_capacity to True in settings.')
last_evaluated_key = data.get(LAST_EVALUATED_KEY, None)
consecutive_provision_throughput_exceeded_ex = 0
except ScanError as e:
# Only retry if provision throughput is exceeded.
if isinstance(e.cause, ClientError):
code = e.cause.response['Error'].get('Code')
if code == "ProvisionedThroughputExceededException":
consecutive_provision_throughput_exceeded_ex += 1
if consecutive_provision_throughput_exceeded_ex > max_consecutive_exceptions:
# Max threshold reached
raise
else:
# Different exception, other than ProvisionedThroughputExceededException
raise
else:
# Not a Client error
raise
# No throttling, and no more scans needed. Just return
if not last_evaluated_key and consecutive_provision_throughput_exceeded_ex == 0:
return
current_time = time.time()
# elapsed_time_ms indicates the time taken in ms from the start of the
# throttled_scan call.
elapsed_time_ms = max(1, round((current_time - start_time) * 1000))
if consecutive_provision_throughput_exceeded_ex == 0:
total_consumed_read_capacity += latest_scan_consumed_capacity
consumed_rate = total_consumed_read_capacity / elapsed_time_ms
rate_available = (read_capacity_to_consume_per_ms - consumed_rate) >= 0
# consecutive_provision_throughput_exceeded_ex > 0 indicates ProvisionedThroughputExceededException occurred.
# ProvisionedThroughputExceededException can occur if:
# - The rate to consume is passed incorrectly.
# - External factors, even if the current scan is within limits.
if not rate_available or (consecutive_provision_throughput_exceeded_ex > 0):
# Minimum value is 1 second.
elapsed_time_s = math.ceil(elapsed_time_ms / 1000)
# Sleep proportional to the ratio of --consumed capacity-- to --capacity to consume--
time_to_sleep = max(1, round((total_consumed_read_capacity/ elapsed_time_s) \
/ read_capacity_to_consume_per_second))
# At any moment if the timeout_seconds hits, then return
if timeout_seconds and (elapsed_time_s + time_to_sleep) > timeout_seconds:
raise ScanError("Input timeout value {0} has expired".format(timeout_seconds))
time.sleep(min(math.ceil(time_to_sleep), max_sleep_between_retry))
# Reset the latest_scan_consumed_capacity, as no scan operation was performed.
latest_scan_consumed_capacity = 0
def scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
limit=None,
conditional_operator=None,
scan_filter=None,
return_consumed_capacity=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
consistent_read=None,
index_name=None):
"""
Performs the scan operation
"""
self._check_condition('filter_condition', filter_condition, scan_filter, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if segment is not None:
operation_kwargs[SEGMENT] = segment
if total_segments:
operation_kwargs[TOTAL_SEGMENTS] = total_segments
if scan_filter:
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
filter_expression = self._get_filter_expression(
table_name, scan_filter, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = consistent_read
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(SCAN, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise ScanError("Failed to scan table: {0}".format(e), e)
def query(self,
table_name,
hash_key,
range_key_condition=None,
filter_condition=None,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
query_filters=None,
conditional_operator=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None):
"""
Performs the Query operation and returns the result
"""
self._check_condition('range_key_condition', range_key_condition, key_conditions, conditional_operator)
self._check_condition('filter_condition', filter_condition, query_filters, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table: {0}".format(table_name))
if index_name:
hash_keyname = tbl.get_index_hash_keyname(index_name)
if not hash_keyname:
raise ValueError("No hash key attribute for index: {0}".format(index_name))
range_keyname = tbl.get_index_range_keyname(index_name)
else:
hash_keyname = tbl.hash_keyname
range_keyname = tbl.range_keyname
key_condition = self._get_condition(table_name, hash_keyname, '__eq__', hash_key)
if range_key_condition is not None:
if range_key_condition.is_valid_range_key_condition(range_keyname):
key_condition = key_condition & range_key_condition
elif filter_condition is None:
# Try to gracefully handle the case where a user passed in a filter as a range key condition
(filter_condition, range_key_condition) = (range_key_condition, None)
else:
raise ValueError("{0} is not a valid range key condition".format(range_key_condition))
if key_conditions is None or len(key_conditions) == 0:
pass # No comparisons on sort key
elif len(key_conditions) > 1:
raise ValueError("Multiple attributes are not supported in key_conditions: {0}".format(key_conditions))
else:
(key, condition), = key_conditions.items()
operator = condition.get(COMPARISON_OPERATOR)
if operator not in COMPARISON_OPERATOR_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, COMPARISON_OPERATOR_VALUES))
operator = KEY_CONDITION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST)
sort_key_expression = self._get_condition(table_name, key, operator, *values)
key_condition = key_condition & sort_key_expression
operation_kwargs[KEY_CONDITION_EXPRESSION] = key_condition.serialize(
name_placeholders, expression_attribute_values)
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
# FilterExpression does not allow key attributes. Check for hash and range key name placeholders
hash_key_placeholder = name_placeholders.get(hash_keyname)
range_key_placeholder = range_keyname and name_placeholders.get(range_keyname)
if (
hash_key_placeholder in filter_expression or
(range_key_placeholder and range_key_placeholder in filter_expression)
):
raise ValueError("'filter_condition' cannot contain key attributes")
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = True
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
# We read the conditional operator even without a query filter passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if query_filters:
filter_expression = self._get_filter_expression(
table_name, query_filters, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if select:
if select.upper() not in SELECT_VALUES:
raise ValueError("{0} must be one of {1}".format(SELECT, SELECT_VALUES))
operation_kwargs[SELECT] = str(select).upper()
if scan_index_forward is not None:
operation_kwargs[SCAN_INDEX_FORWARD] = scan_index_forward
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(QUERY, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise QueryError("Failed to query items: {0}".format(e), e)
def _get_condition_expression(self, table_name, expected, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the ConditionExpression needed for DeleteItem, PutItem, and UpdateItem operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(expected.keys()):
condition = expected[key]
if EXISTS in condition:
operator = NOT_NULL if condition.get(EXISTS, True) else NULL
values = []
elif VALUE in condition:
operator = EQ
values = [condition.get(VALUE)]
else:
operator = condition.get(COMPARISON_OPERATOR)
values = condition.get(ATTR_VALUE_LIST, [])
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_filter_expression(self, table_name, filters, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the FilterExpression needed for Query and Scan operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(filters.keys()):
condition = filters[key]
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST, [])
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_condition(self, table_name, attribute_name, operator, *values):
values = [
{self.get_attribute_type(table_name, attribute_name, value): self.parse_attribute(value)}
for value in values
]
return getattr(Path([attribute_name]), operator)(*values)
def _check_actions(self, actions, attribute_updates):
if actions is not None:
if attribute_updates is not None:
raise ValueError("Legacy attribute updates cannot be used with update actions")
else:
if attribute_updates is not None:
warnings.warn("Legacy attribute updates are deprecated in favor of update actions")
def _check_condition(self, name, condition, expected_or_filter, conditional_operator):
if condition is not None:
if not isinstance(condition, Condition):
raise ValueError("'{0}' must be an instance of Condition".format(name))
if expected_or_filter or conditional_operator is not None:
raise ValueError("Legacy conditional parameters cannot be used with condition expressions")
else:
if expected_or_filter or conditional_operator is not None:
warnings.warn("Legacy conditional parameters are deprecated in favor of condition expressions")
@staticmethod
def _reverse_dict(d):
return dict((v, k) for k, v in six.iteritems(d))
def _convert_binary(attr):
if BINARY_SHORT in attr:
attr[BINARY_SHORT] = b64decode(attr[BINARY_SHORT].encode(DEFAULT_ENCODING))
elif BINARY_SET_SHORT in attr:
value = attr[BINARY_SET_SHORT]
if value and len(value):
attr[BINARY_SET_SHORT] = set(b64decode(v.encode(DEFAULT_ENCODING)) for v in value)
|
def _make_api_call(self, operation_name, operation_kwargs):
"""
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch requests for unit testing
"""
operation_model = self.client._service_model.operation_model(operation_name)
request_dict = self.client._convert_to_request_dict(
operation_kwargs,
operation_model
)
prepared_request = self._create_prepared_request(request_dict, operation_model)
for i in range(0, self._max_retry_attempts_exception + 1):
attempt_number = i + 1
is_last_attempt_for_exceptions = i == self._max_retry_attempts_exception
try:
response = self.requests_session.send(
prepared_request,
timeout=self._request_timeout_seconds,
proxies=self.client._endpoint.proxies,
)
data = response.json()
except (requests.RequestException, ValueError) as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
else:
# No backoff for fast-fail exceptions that likely failed at the frontend
log.debug(
'Retry needed for (%s) after attempt %s, retryable %s caught: %s',
operation_name,
attempt_number,
e.__class__.__name__,
e
)
continue
if response.status_code >= 300:
# Extract error code from __type
code = data.get('__type', '')
if '#' in code:
code = code.rsplit('#', 1)[1]
botocore_expected_format = {'Error': {'Message': data.get('message', ''), 'Code': code}}
verbose_properties = {
'request_id': response.headers.get('x-amzn-RequestId')
}
if 'RequestItems' in operation_kwargs:
# Batch operations can hit multiple tables, report them comma separated
verbose_properties['table_name'] = ','.join(operation_kwargs['RequestItems'])
else:
verbose_properties['table_name'] = operation_kwargs.get('TableName')
try:
raise VerboseClientError(botocore_expected_format, operation_name, verbose_properties)
except VerboseClientError as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
elif response.status_code < 500 and code != 'ProvisionedThroughputExceededException':
# We don't retry on a ConditionalCheckFailedException or other 4xx (except for
# throughput related errors) because we assume they will fail in perpetuity.
# Retrying when there is already contention could cause other problems
# in part due to unnecessary consumption of throughput.
raise
else:
# We use fully-jittered exponentially-backed-off retries:
# https://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_time_ms = random.randint(0, self._base_backoff_ms * (2 ** i))
log.debug(
'Retry with backoff needed for (%s) after attempt %s,'
'sleeping for %s milliseconds, retryable %s caught: %s',
operation_name,
attempt_number,
sleep_time_ms,
e.__class__.__name__,
e
)
time.sleep(sleep_time_ms / 1000.0)
continue
return self._handle_binary_attributes(data)
| 335 | 418 |
"""
Lowest level connection
"""
from __future__ import division
import logging
import math
import random
import time
import uuid
import warnings
from base64 import b64decode
from threading import local
import six
from botocore.client import ClientError
from botocore.exceptions import BotoCoreError
from botocore.session import get_session
from botocore.vendored import requests
from botocore.vendored.requests import Request
from six.moves import range
from pynamodb.compat import NullHandler
from pynamodb.connection.util import pythonic
from pynamodb.constants import (
RETURN_CONSUMED_CAPACITY_VALUES, RETURN_ITEM_COLL_METRICS_VALUES, COMPARISON_OPERATOR_VALUES,
RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY, RETURN_VALUES_VALUES, ATTR_UPDATE_ACTIONS,
COMPARISON_OPERATOR, EXCLUSIVE_START_KEY, SCAN_INDEX_FORWARD, SCAN_FILTER_VALUES, ATTR_DEFINITIONS,
BATCH_WRITE_ITEM, CONSISTENT_READ, ATTR_VALUE_LIST, DESCRIBE_TABLE, KEY_CONDITION_EXPRESSION,
BATCH_GET_ITEM, DELETE_REQUEST, SELECT_VALUES, RETURN_VALUES, REQUEST_ITEMS, ATTR_UPDATES,
PROJECTION_EXPRESSION, SERVICE_NAME, DELETE_ITEM, PUT_REQUEST, UPDATE_ITEM, SCAN_FILTER, TABLE_NAME,
INDEX_NAME, KEY_SCHEMA, ATTR_NAME, ATTR_TYPE, TABLE_KEY, EXPECTED, KEY_TYPE, GET_ITEM, UPDATE,
PUT_ITEM, SELECT, ACTION, EXISTS, VALUE, LIMIT, QUERY, SCAN, ITEM, LOCAL_SECONDARY_INDEXES,
KEYS, KEY, EQ, SEGMENT, TOTAL_SEGMENTS, CREATE_TABLE, PROVISIONED_THROUGHPUT, READ_CAPACITY_UNITS,
WRITE_CAPACITY_UNITS, GLOBAL_SECONDARY_INDEXES, PROJECTION, EXCLUSIVE_START_TABLE_NAME, TOTAL,
DELETE_TABLE, UPDATE_TABLE, LIST_TABLES, GLOBAL_SECONDARY_INDEX_UPDATES, ATTRIBUTES,
CONSUMED_CAPACITY, CAPACITY_UNITS, QUERY_FILTER, QUERY_FILTER_VALUES, CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS, NULL, NOT_NULL, SHORT_ATTR_TYPES, DELETE, PUT,
ITEMS, DEFAULT_ENCODING, BINARY_SHORT, BINARY_SET_SHORT, LAST_EVALUATED_KEY, RESPONSES, UNPROCESSED_KEYS,
UNPROCESSED_ITEMS, STREAM_SPECIFICATION, STREAM_VIEW_TYPE, STREAM_ENABLED, UPDATE_EXPRESSION,
EXPRESSION_ATTRIBUTE_NAMES, EXPRESSION_ATTRIBUTE_VALUES, KEY_CONDITION_OPERATOR_MAP,
CONDITION_EXPRESSION, FILTER_EXPRESSION, FILTER_EXPRESSION_OPERATOR_MAP, NOT_CONTAINS, AND)
from pynamodb.exceptions import (
TableError, QueryError, PutError, DeleteError, UpdateError, GetError, ScanError, TableDoesNotExist,
VerboseClientError
)
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.operand import Path
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Update
from pynamodb.settings import get_settings_value
from pynamodb.signals import pre_dynamodb_send, post_dynamodb_send
from pynamodb.types import HASH, RANGE
BOTOCORE_EXCEPTIONS = (BotoCoreError, ClientError)
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class MetaTable(object):
"""
A pythonic wrapper around table metadata
"""
def __init__(self, data):
self.data = data or {}
self._range_keyname = None
self._hash_keyname = None
def __repr__(self):
if self.data:
return six.u("MetaTable<{0}>".format(self.data.get(TABLE_NAME)))
@property
def range_keyname(self):
"""
Returns the name of this table's range key
"""
if self._range_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == RANGE:
self._range_keyname = attr.get(ATTR_NAME)
return self._range_keyname
@property
def hash_keyname(self):
"""
Returns the name of this table's hash key
"""
if self._hash_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == HASH:
self._hash_keyname = attr.get(ATTR_NAME)
break
return self._hash_keyname
def get_key_names(self, index_name=None):
"""
Returns the names of the primary key attributes and index key attributes (if index_name is specified)
"""
key_names = [self.hash_keyname]
if self.range_keyname:
key_names.append(self.range_keyname)
if index_name is not None:
index_hash_keyname = self.get_index_hash_keyname(index_name)
if index_hash_keyname not in key_names:
key_names.append(index_hash_keyname)
index_range_keyname = self.get_index_range_keyname(index_name)
if index_range_keyname is not None and index_range_keyname not in key_names:
key_names.append(index_range_keyname)
return key_names
def get_index_hash_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == HASH:
return schema_key.get(ATTR_NAME)
def get_index_range_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == RANGE:
return schema_key.get(ATTR_NAME)
return None
def get_item_attribute_map(self, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
if pythonic_key:
item_key = item_key
attr_map = {
item_key: {}
}
for key, value in attributes.items():
# In this case, the user provided a mapping
# {'key': {'S': 'value'}}
if isinstance(value, dict):
attr_map[item_key][key] = value
else:
attr_map[item_key][key] = {
self.get_attribute_type(key): value
}
return attr_map
def get_attribute_type(self, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
"""
for attr in self.data.get(ATTR_DEFINITIONS):
if attr.get(ATTR_NAME) == attribute_name:
return attr.get(ATTR_TYPE)
if value is not None and isinstance(value, dict):
for key in SHORT_ATTR_TYPES:
if key in value:
return key
attr_names = [attr.get(ATTR_NAME) for attr in self.data.get(ATTR_DEFINITIONS)]
raise ValueError("No attribute {0} in {1}".format(attribute_name, attr_names))
def get_identifier_map(self, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
kwargs = {
key: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): hash_key
}
}
}
if range_key is not None:
kwargs[key][self.range_keyname] = {
self.get_attribute_type(self.range_keyname): range_key
}
return kwargs
def get_exclusive_start_key_map(self, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
if isinstance(exclusive_start_key, dict) and self.hash_keyname in exclusive_start_key:
# This is useful when paginating results, as the LastEvaluatedKey returned is already
# structured properly
return {
EXCLUSIVE_START_KEY: exclusive_start_key
}
else:
return {
EXCLUSIVE_START_KEY: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): exclusive_start_key
}
}
}
class Connection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self, region=None, host=None, session_cls=None,
request_timeout_seconds=None, max_retry_attempts=None, base_backoff_ms=None):
self._tables = {}
self.host = host
self._local = local()
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value('region')
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value('session_cls')
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value('request_timeout_seconds')
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value('max_retry_attempts')
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value('base_backoff_ms')
def __repr__(self):
return six.u("Connection<{0}>".format(self.client.meta.endpoint_url))
def _log_debug(self, operation, kwargs):
"""
Sends a debug message to the logger
"""
log.debug("Calling %s with arguments %s", operation, kwargs)
def _log_debug_response(self, operation, response):
"""
Sends a debug message to the logger about a response
"""
log.debug("%s response: %s", operation, response)
def _log_error(self, operation, response):
"""
Sends an error message to the logger
"""
log.error("%s failed with status: %s, message: %s",
operation, response.status_code,response.content)
def _create_prepared_request(self, request_dict, operation_model):
"""
Create a prepared request object from request_dict, and operation_model
"""
boto_prepared_request = self.client._endpoint.create_request(request_dict, operation_model)
# The call requests_session.send(final_prepared_request) ignores the headers which are
# part of the request session. In order to include the requests session headers inside
# the request, we create a new request object, and call prepare_request with the newly
# created request object
raw_request_with_params = Request(
boto_prepared_request.method,
boto_prepared_request.url,
data=boto_prepared_request.body,
headers=boto_prepared_request.headers
)
return self.requests_session.prepare_request(raw_request_with_params)
def dispatch(self, operation_name, operation_kwargs):
"""
Dispatches `operation_name` with arguments `operation_kwargs`
Raises TableDoesNotExist if the specified table does not exist
"""
if operation_name not in [DESCRIBE_TABLE, LIST_TABLES, UPDATE_TABLE, DELETE_TABLE, CREATE_TABLE]:
if RETURN_CONSUMED_CAPACITY not in operation_kwargs:
operation_kwargs.update(self.get_consumed_capacity_map(TOTAL))
self._log_debug(operation_name, operation_kwargs)
table_name = operation_kwargs.get(TABLE_NAME)
req_uuid = uuid.uuid4()
self.send_pre_boto_callback(operation_name, req_uuid, table_name)
data = self._make_api_call(operation_name, operation_kwargs)
self.send_post_boto_callback(operation_name, req_uuid, table_name)
if data and CONSUMED_CAPACITY in data:
capacity = data.get(CONSUMED_CAPACITY)
if isinstance(capacity, dict) and CAPACITY_UNITS in capacity:
capacity = capacity.get(CAPACITY_UNITS)
log.debug("%s %s consumed %s units", data.get(TABLE_NAME, ''), operation_name, capacity)
return data
def send_post_boto_callback(self, operation_name, req_uuid, table_name):
try:
post_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("post_boto callback threw an exception.")
def send_pre_boto_callback(self, operation_name, req_uuid, table_name):
try:
pre_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("pre_boto callback threw an exception.")
def _make_api_call(self, operation_name, operation_kwargs):
"""
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch requests for unit testing
"""
operation_model = self.client._service_model.operation_model(operation_name)
request_dict = self.client._convert_to_request_dict(
operation_kwargs,
operation_model
)
prepared_request = self._create_prepared_request(request_dict, operation_model)
for i in range(0, self._max_retry_attempts_exception + 1):
attempt_number = i + 1
is_last_attempt_for_exceptions = i == self._max_retry_attempts_exception
try:
response = self.requests_session.send(
prepared_request,
timeout=self._request_timeout_seconds,
proxies=self.client._endpoint.proxies,
)
data = response.json()
except (requests.RequestException, ValueError) as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
else:
# No backoff for fast-fail exceptions that likely failed at the frontend
log.debug(
'Retry needed for (%s) after attempt %s, retryable %s caught: %s',
operation_name,
attempt_number,
e.__class__.__name__,
e
)
continue
if response.status_code >= 300:
# Extract error code from __type
code = data.get('__type', '')
if '#' in code:
code = code.rsplit('#', 1)[1]
botocore_expected_format = {'Error': {'Message': data.get('message', ''), 'Code': code}}
verbose_properties = {
'request_id': response.headers.get('x-amzn-RequestId')
}
if 'RequestItems' in operation_kwargs:
# Batch operations can hit multiple tables, report them comma separated
verbose_properties['table_name'] = ','.join(operation_kwargs['RequestItems'])
else:
verbose_properties['table_name'] = operation_kwargs.get('TableName')
try:
raise VerboseClientError(botocore_expected_format, operation_name, verbose_properties)
except VerboseClientError as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
elif response.status_code < 500 and code != 'ProvisionedThroughputExceededException':
# We don't retry on a ConditionalCheckFailedException or other 4xx (except for
# throughput related errors) because we assume they will fail in perpetuity.
# Retrying when there is already contention could cause other problems
# in part due to unnecessary consumption of throughput.
raise
else:
# We use fully-jittered exponentially-backed-off retries:
# https://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_time_ms = random.randint(0, self._base_backoff_ms * (2 ** i))
log.debug(
'Retry with backoff needed for (%s) after attempt %s,'
'sleeping for %s milliseconds, retryable %s caught: %s',
operation_name,
attempt_number,
sleep_time_ms,
e.__class__.__name__,
e
)
time.sleep(sleep_time_ms / 1000.0)
continue
return self._handle_binary_attributes(data)
@staticmethod
def _handle_binary_attributes(data):
""" Simulate botocore's binary attribute handling """
if ITEM in data:
for attr in six.itervalues(data[ITEM]):
_convert_binary(attr)
if ITEMS in data:
for item in data[ITEMS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if RESPONSES in data:
for item_list in six.itervalues(data[RESPONSES]):
for item in item_list:
for attr in six.itervalues(item):
_convert_binary(attr)
if LAST_EVALUATED_KEY in data:
for attr in six.itervalues(data[LAST_EVALUATED_KEY]):
_convert_binary(attr)
if UNPROCESSED_KEYS in data:
for table_data in six.itervalues(data[UNPROCESSED_KEYS]):
for item in table_data[KEYS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if UNPROCESSED_ITEMS in data:
for table_unprocessed_requests in six.itervalues(data[UNPROCESSED_ITEMS]):
for request in table_unprocessed_requests:
for item_mapping in six.itervalues(request):
for item in six.itervalues(item_mapping):
for attr in six.itervalues(item):
_convert_binary(attr)
if ATTRIBUTES in data:
for attr in six.itervalues(data[ATTRIBUTES]):
_convert_binary(attr)
return data
@property
def session(self):
"""
Returns a valid botocore session
"""
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, 'session', None) is None:
self._local.session = get_session()
return self._local.session
@property
def requests_session(self):
"""
Return a requests session to execute prepared requests using the same pool
"""
if self._requests_session is None:
self._requests_session = self.session_cls()
return self._requests_session
@property
def client(self):
"""
Returns a botocore dynamodb client
"""
# botocore has a known issue where it will cache empty credentials
# https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.py#L1016-L1021
# if the client does not have credentials, we create a new client
# otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles
if not self._client or (self._client._request_signer and not self._client._request_signer._credentials):
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
return self._client
def get_meta_table(self, table_name, refresh=False):
"""
Returns a MetaTable
"""
if table_name not in self._tables or refresh:
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
self._tables[table_name] = MetaTable(data.get(TABLE_KEY))
except BotoCoreError as e:
raise TableError("Unable to describe table: {0}".format(e), e)
except ClientError as e:
if 'ResourceNotFound' in e.response['Error']['Code']:
raise TableDoesNotExist(e.response['Error']['Message'])
else:
raise
return self._tables[table_name]
def create_table(self,
table_name,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None,
stream_specification=None):
"""
Performs the CreateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name,
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
}
attrs_list = []
if attribute_definitions is None:
raise ValueError("attribute_definitions argument is required")
for attr in attribute_definitions:
attrs_list.append({
ATTR_NAME: attr.get(pythonic(ATTR_NAME)),
ATTR_TYPE: attr.get(pythonic(ATTR_TYPE))
})
operation_kwargs[ATTR_DEFINITIONS] = attrs_list
if global_secondary_indexes:
global_secondary_indexes_list = []
for index in global_secondary_indexes:
global_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
PROVISIONED_THROUGHPUT: index.get(pythonic(PROVISIONED_THROUGHPUT))
})
operation_kwargs[GLOBAL_SECONDARY_INDEXES] = global_secondary_indexes_list
if key_schema is None:
raise ValueError("key_schema is required")
key_schema_list = []
for item in key_schema:
key_schema_list.append({
ATTR_NAME: item.get(pythonic(ATTR_NAME)),
KEY_TYPE: str(item.get(pythonic(KEY_TYPE))).upper()
})
operation_kwargs[KEY_SCHEMA] = sorted(key_schema_list, key=lambda x: x.get(KEY_TYPE))
local_secondary_indexes_list = []
if local_secondary_indexes:
for index in local_secondary_indexes:
local_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
})
operation_kwargs[LOCAL_SECONDARY_INDEXES] = local_secondary_indexes_list
if stream_specification:
operation_kwargs[STREAM_SPECIFICATION] = {
STREAM_ENABLED: stream_specification[pythonic(STREAM_ENABLED)],
STREAM_VIEW_TYPE: stream_specification[pythonic(STREAM_VIEW_TYPE)]
}
try:
data = self.dispatch(CREATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to create table: {0}".format(e), e)
return data
def delete_table(self, table_name):
"""
Performs the DeleteTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DELETE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to delete table: {0}".format(e), e)
return data
def update_table(self,
table_name,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
"""
Performs the UpdateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
if read_capacity_units and not write_capacity_units or write_capacity_units and not read_capacity_units:
raise ValueError("read_capacity_units and write_capacity_units are required together")
if read_capacity_units and write_capacity_units:
operation_kwargs[PROVISIONED_THROUGHPUT] = {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
if global_secondary_index_updates:
global_secondary_indexes_list = []
for index in global_secondary_index_updates:
global_secondary_indexes_list.append({
UPDATE: {
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: index.get(pythonic(READ_CAPACITY_UNITS)),
WRITE_CAPACITY_UNITS: index.get(pythonic(WRITE_CAPACITY_UNITS))
}
}
})
operation_kwargs[GLOBAL_SECONDARY_INDEX_UPDATES] = global_secondary_indexes_list
try:
return self.dispatch(UPDATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update table: {0}".format(e), e)
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Performs the ListTables operation
"""
operation_kwargs = {}
if exclusive_start_table_name:
operation_kwargs.update({
EXCLUSIVE_START_TABLE_NAME: exclusive_start_table_name
})
if limit is not None:
operation_kwargs.update({
LIMIT: limit
})
try:
return self.dispatch(LIST_TABLES, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Unable to list tables: {0}".format(e), e)
def describe_table(self, table_name):
"""
Performs the DescribeTable operation
"""
try:
tbl = self.get_meta_table(table_name, refresh=True)
if tbl:
return tbl.data
except ValueError:
pass
raise TableDoesNotExist(table_name)
def get_conditional_operator(self, operator):
"""
Returns a dictionary containing the correct conditional operator,
validating it first.
"""
operator = operator.upper()
if operator not in CONDITIONAL_OPERATORS:
raise ValueError(
"The {0} must be one of {1}".format(
CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS
)
)
return {
CONDITIONAL_OPERATOR: operator
}
def get_item_attribute_map(self, table_name, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_item_attribute_map(
attributes,
item_key=item_key,
pythonic_key=pythonic_key)
def get_expected_map(self, table_name, expected):
"""
Builds the expected map that is common to several operations
"""
kwargs = {EXPECTED: {}}
for key, condition in expected.items():
if EXISTS in condition:
kwargs[EXPECTED][key] = {
EXISTS: condition.get(EXISTS)
}
elif VALUE in condition:
kwargs[EXPECTED][key] = {
VALUE: {
self.get_attribute_type(table_name, key): condition.get(VALUE)
}
}
elif COMPARISON_OPERATOR in condition:
kwargs[EXPECTED][key] = {
COMPARISON_OPERATOR: condition.get(COMPARISON_OPERATOR),
}
values = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_type = self.get_attribute_type(table_name, key, value)
values.append({attr_type: self.parse_attribute(value)})
if condition.get(COMPARISON_OPERATOR) not in [NULL, NOT_NULL]:
kwargs[EXPECTED][key][ATTR_VALUE_LIST] = values
return kwargs
def parse_attribute(self, attribute, return_type=False):
"""
Returns the attribute value, where the attribute can be
a raw attribute value, or a dictionary containing the type:
{'S': 'String value'}
"""
if isinstance(attribute, dict):
for key in SHORT_ATTR_TYPES:
if key in attribute:
if return_type:
return key, attribute.get(key)
return attribute.get(key)
raise ValueError("Invalid attribute supplied: {0}".format(attribute))
else:
if return_type:
return None, attribute
return attribute
def get_attribute_type(self, table_name, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
:param value: The attribute value an be supplied just in case the type is already included
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_attribute_type(attribute_name, value=value)
def get_identifier_map(self, table_name, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_identifier_map(hash_key, range_key=range_key, key=key)
def get_query_filter_map(self, table_name, query_filters):
"""
Builds the QueryFilter object needed for the Query operation
"""
kwargs = {
QUERY_FILTER: {}
}
for key, condition in query_filters.items():
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
attr_value_list = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_value_list.append({
self.get_attribute_type(table_name, key, value): self.parse_attribute(value)
})
kwargs[QUERY_FILTER][key] = {
COMPARISON_OPERATOR: operator
}
if len(attr_value_list):
kwargs[QUERY_FILTER][key][ATTR_VALUE_LIST] = attr_value_list
return kwargs
def get_consumed_capacity_map(self, return_consumed_capacity):
"""
Builds the consumed capacity map that is common to several operations
"""
if return_consumed_capacity.upper() not in RETURN_CONSUMED_CAPACITY_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY_VALUES))
return {
RETURN_CONSUMED_CAPACITY: str(return_consumed_capacity).upper()
}
def get_return_values_map(self, return_values):
"""
Builds the return values map that is common to several operations
"""
if return_values.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_VALUES, RETURN_VALUES_VALUES))
return {
RETURN_VALUES: str(return_values).upper()
}
def get_item_collection_map(self, return_item_collection_metrics):
"""
Builds the item collection map
"""
if return_item_collection_metrics.upper() not in RETURN_ITEM_COLL_METRICS_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_ITEM_COLL_METRICS_VALUES))
return {
RETURN_ITEM_COLL_METRICS: str(return_item_collection_metrics).upper()
}
def get_exclusive_start_key_map(self, table_name, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_exclusive_start_key_map(exclusive_start_key)
def delete_item(self,
table_name,
hash_key,
range_key=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the DeleteItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(DELETE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise DeleteError("Failed to delete item: {0}".format(e), e)
def update_item(self,
table_name,
hash_key,
range_key=None,
actions=None,
attribute_updates=None,
condition=None,
expected=None,
return_consumed_capacity=None,
conditional_operator=None,
return_item_collection_metrics=None,
return_values=None):
"""
Performs the UpdateItem operation
"""
self._check_actions(actions, attribute_updates)
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if not actions and not attribute_updates:
raise ValueError("{0} cannot be empty".format(ATTR_UPDATES))
actions = actions or []
attribute_updates = attribute_updates or {}
update_expression = Update(*actions)
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(attribute_updates.keys()):
path = Path([key])
update = attribute_updates[key]
action = update.get(ACTION)
if action not in ATTR_UPDATE_ACTIONS:
raise ValueError("{0} must be one of {1}".format(ACTION, ATTR_UPDATE_ACTIONS))
value = update.get(VALUE)
attr_type, value = self.parse_attribute(value, return_type=True)
if attr_type is None and action != DELETE:
attr_type = self.get_attribute_type(table_name, key, value)
value = {attr_type: value}
if action == DELETE:
action = path.remove() if attr_type is None else path.delete(value)
elif action == PUT:
action = path.set(value)
else:
action = path.add(value)
update_expression.add_action(action)
operation_kwargs[UPDATE_EXPRESSION] = update_expression.serialize(name_placeholders, expression_attribute_values)
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(UPDATE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise UpdateError("Failed to update item: {0}".format(e), e)
def put_item(self,
table_name,
hash_key,
range_key=None,
attributes=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the PutItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
name_placeholders = {}
expression_attribute_values = {}
if attributes:
attrs = self.get_item_attribute_map(table_name, attributes)
operation_kwargs[ITEM].update(attrs[ITEM])
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(PUT_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to put item: {0}".format(e), e)
def batch_write_item(self,
table_name,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the batch_write_item operation
"""
if put_items is None and delete_items is None:
raise ValueError("Either put_items or delete_items must be specified")
operation_kwargs = {
REQUEST_ITEMS: {
table_name: []
}
}
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
put_items_list = []
if put_items:
for item in put_items:
put_items_list.append({
PUT_REQUEST: self.get_item_attribute_map(table_name, item, pythonic_key=False)
})
delete_items_list = []
if delete_items:
for item in delete_items:
delete_items_list.append({
DELETE_REQUEST: self.get_item_attribute_map(table_name, item, item_key=KEY, pythonic_key=False)
})
operation_kwargs[REQUEST_ITEMS][table_name] = delete_items_list + put_items_list
try:
return self.dispatch(BATCH_WRITE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to batch write items: {0}".format(e), e)
def batch_get_item(self,
table_name,
keys,
consistent_read=None,
return_consumed_capacity=None,
attributes_to_get=None):
"""
Performs the batch get item operation
"""
operation_kwargs = {
REQUEST_ITEMS: {
table_name: {}
}
}
args_map = {}
name_placeholders = {}
if consistent_read:
args_map[CONSISTENT_READ] = consistent_read
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
args_map[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
args_map[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[REQUEST_ITEMS][table_name].update(args_map)
keys_map = {KEYS: []}
for key in keys:
keys_map[KEYS].append(
self.get_item_attribute_map(table_name, key)[ITEM]
)
operation_kwargs[REQUEST_ITEMS][table_name].update(keys_map)
try:
return self.dispatch(BATCH_GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to batch get items: {0}".format(e), e)
def get_item(self,
table_name,
hash_key,
range_key=None,
consistent_read=False,
attributes_to_get=None):
"""
Performs the GetItem operation and returns the result
"""
operation_kwargs = {}
name_placeholders = {}
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[CONSISTENT_READ] = consistent_read
operation_kwargs[TABLE_NAME] = table_name
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
try:
return self.dispatch(GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to get item: {0}".format(e), e)
def rate_limited_scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
page_size=None,
limit=None,
conditional_operator=None,
scan_filter=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
timeout_seconds=None,
read_capacity_to_consume_per_second=10,
allow_rate_limited_scan_without_consumed_capacity=None,
max_sleep_between_retry=10,
max_consecutive_exceptions=10,
consistent_read=None,
index_name=None):
"""
Performs a rate limited scan on the table. The API uses the scan API to fetch items from
DynamoDB. The rate_limited_scan uses the 'ConsumedCapacity' value returned from DynamoDB to
limit the rate of the scan. 'ProvisionedThroughputExceededException' is also handled and retried.
:param table_name: Name of the table to perform scan on.
:param filter_condition: Condition used to restrict the scan results
:param attributes_to_get: A list of attributes to return.
:param page_size: Page size of the scan to DynamoDB
:param limit: Used to limit the number of results returned
:param conditional_operator:
:param scan_filter: A map indicating the condition that evaluates the scan results
:param exclusive_start_key: If set, provides the starting point for scan.
:param segment: If set, then scans the segment
:param total_segments: If set, then specifies total segments
:param timeout_seconds: Timeout value for the rate_limited_scan method, to prevent it from running
infinitely
:param read_capacity_to_consume_per_second: Amount of read capacity to consume
every second
:param allow_rate_limited_scan_without_consumed_capacity: If set, proceeds without rate limiting if
the server does not support returning consumed capacity in responses.
:param max_sleep_between_retry: Max value for sleep in seconds in between scans during
throttling/rate limit scenarios
:param max_consecutive_exceptions: Max number of consecutive ProvisionedThroughputExceededException
exception for scan to exit
:param consistent_read: enable consistent read
:param index_name: an index to perform the scan on
"""
read_capacity_to_consume_per_ms = float(read_capacity_to_consume_per_second) / 1000
if allow_rate_limited_scan_without_consumed_capacity is None:
allow_rate_limited_scan_without_consumed_capacity = get_settings_value(
'allow_rate_limited_scan_without_consumed_capacity'
)
total_consumed_read_capacity = 0.0
last_evaluated_key = exclusive_start_key
rate_available = True
latest_scan_consumed_capacity = 0
consecutive_provision_throughput_exceeded_ex = 0
start_time = time.time()
if page_size is None:
if limit and read_capacity_to_consume_per_second > limit:
page_size = limit
else:
page_size = read_capacity_to_consume_per_second
while True:
if rate_available:
try:
data = self.scan(
table_name,
filter_condition=filter_condition,
attributes_to_get=attributes_to_get,
exclusive_start_key=last_evaluated_key,
limit=page_size,
conditional_operator=conditional_operator,
return_consumed_capacity=TOTAL,
scan_filter=scan_filter,
segment=segment,
total_segments=total_segments,
consistent_read=consistent_read,
index_name=index_name
)
for item in data.get(ITEMS):
yield item
if limit is not None:
limit -= 1
if not limit:
return
if CONSUMED_CAPACITY in data:
latest_scan_consumed_capacity = data.get(CONSUMED_CAPACITY).get(CAPACITY_UNITS)
else:
if allow_rate_limited_scan_without_consumed_capacity:
latest_scan_consumed_capacity = 0
else:
raise ScanError('Rate limited scan not possible because the server did not send back'
'consumed capacity information. If you wish scans to complete anyway'
'without functioning rate limiting, set '
'allow_rate_limited_scan_without_consumed_capacity to True in settings.')
last_evaluated_key = data.get(LAST_EVALUATED_KEY, None)
consecutive_provision_throughput_exceeded_ex = 0
except ScanError as e:
# Only retry if provision throughput is exceeded.
if isinstance(e.cause, ClientError):
code = e.cause.response['Error'].get('Code')
if code == "ProvisionedThroughputExceededException":
consecutive_provision_throughput_exceeded_ex += 1
if consecutive_provision_throughput_exceeded_ex > max_consecutive_exceptions:
# Max threshold reached
raise
else:
# Different exception, other than ProvisionedThroughputExceededException
raise
else:
# Not a Client error
raise
# No throttling, and no more scans needed. Just return
if not last_evaluated_key and consecutive_provision_throughput_exceeded_ex == 0:
return
current_time = time.time()
# elapsed_time_ms indicates the time taken in ms from the start of the
# throttled_scan call.
elapsed_time_ms = max(1, round((current_time - start_time) * 1000))
if consecutive_provision_throughput_exceeded_ex == 0:
total_consumed_read_capacity += latest_scan_consumed_capacity
consumed_rate = total_consumed_read_capacity / elapsed_time_ms
rate_available = (read_capacity_to_consume_per_ms - consumed_rate) >= 0
# consecutive_provision_throughput_exceeded_ex > 0 indicates ProvisionedThroughputExceededException occurred.
# ProvisionedThroughputExceededException can occur if:
# - The rate to consume is passed incorrectly.
# - External factors, even if the current scan is within limits.
if not rate_available or (consecutive_provision_throughput_exceeded_ex > 0):
# Minimum value is 1 second.
elapsed_time_s = math.ceil(elapsed_time_ms / 1000)
# Sleep proportional to the ratio of --consumed capacity-- to --capacity to consume--
time_to_sleep = max(1, round((total_consumed_read_capacity/ elapsed_time_s) \
/ read_capacity_to_consume_per_second))
# At any moment if the timeout_seconds hits, then return
if timeout_seconds and (elapsed_time_s + time_to_sleep) > timeout_seconds:
raise ScanError("Input timeout value {0} has expired".format(timeout_seconds))
time.sleep(min(math.ceil(time_to_sleep), max_sleep_between_retry))
# Reset the latest_scan_consumed_capacity, as no scan operation was performed.
latest_scan_consumed_capacity = 0
def scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
limit=None,
conditional_operator=None,
scan_filter=None,
return_consumed_capacity=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
consistent_read=None,
index_name=None):
"""
Performs the scan operation
"""
self._check_condition('filter_condition', filter_condition, scan_filter, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if segment is not None:
operation_kwargs[SEGMENT] = segment
if total_segments:
operation_kwargs[TOTAL_SEGMENTS] = total_segments
if scan_filter:
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
filter_expression = self._get_filter_expression(
table_name, scan_filter, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = consistent_read
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(SCAN, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise ScanError("Failed to scan table: {0}".format(e), e)
def query(self,
table_name,
hash_key,
range_key_condition=None,
filter_condition=None,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
query_filters=None,
conditional_operator=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None):
"""
Performs the Query operation and returns the result
"""
self._check_condition('range_key_condition', range_key_condition, key_conditions, conditional_operator)
self._check_condition('filter_condition', filter_condition, query_filters, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table: {0}".format(table_name))
if index_name:
hash_keyname = tbl.get_index_hash_keyname(index_name)
if not hash_keyname:
raise ValueError("No hash key attribute for index: {0}".format(index_name))
range_keyname = tbl.get_index_range_keyname(index_name)
else:
hash_keyname = tbl.hash_keyname
range_keyname = tbl.range_keyname
key_condition = self._get_condition(table_name, hash_keyname, '__eq__', hash_key)
if range_key_condition is not None:
if range_key_condition.is_valid_range_key_condition(range_keyname):
key_condition = key_condition & range_key_condition
elif filter_condition is None:
# Try to gracefully handle the case where a user passed in a filter as a range key condition
(filter_condition, range_key_condition) = (range_key_condition, None)
else:
raise ValueError("{0} is not a valid range key condition".format(range_key_condition))
if key_conditions is None or len(key_conditions) == 0:
pass # No comparisons on sort key
elif len(key_conditions) > 1:
raise ValueError("Multiple attributes are not supported in key_conditions: {0}".format(key_conditions))
else:
(key, condition), = key_conditions.items()
operator = condition.get(COMPARISON_OPERATOR)
if operator not in COMPARISON_OPERATOR_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, COMPARISON_OPERATOR_VALUES))
operator = KEY_CONDITION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST)
sort_key_expression = self._get_condition(table_name, key, operator, *values)
key_condition = key_condition & sort_key_expression
operation_kwargs[KEY_CONDITION_EXPRESSION] = key_condition.serialize(
name_placeholders, expression_attribute_values)
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
# FilterExpression does not allow key attributes. Check for hash and range key name placeholders
hash_key_placeholder = name_placeholders.get(hash_keyname)
range_key_placeholder = range_keyname and name_placeholders.get(range_keyname)
if (
hash_key_placeholder in filter_expression or
(range_key_placeholder and range_key_placeholder in filter_expression)
):
raise ValueError("'filter_condition' cannot contain key attributes")
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = True
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
# We read the conditional operator even without a query filter passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if query_filters:
filter_expression = self._get_filter_expression(
table_name, query_filters, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if select:
if select.upper() not in SELECT_VALUES:
raise ValueError("{0} must be one of {1}".format(SELECT, SELECT_VALUES))
operation_kwargs[SELECT] = str(select).upper()
if scan_index_forward is not None:
operation_kwargs[SCAN_INDEX_FORWARD] = scan_index_forward
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(QUERY, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise QueryError("Failed to query items: {0}".format(e), e)
def _get_condition_expression(self, table_name, expected, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the ConditionExpression needed for DeleteItem, PutItem, and UpdateItem operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(expected.keys()):
condition = expected[key]
if EXISTS in condition:
operator = NOT_NULL if condition.get(EXISTS, True) else NULL
values = []
elif VALUE in condition:
operator = EQ
values = [condition.get(VALUE)]
else:
operator = condition.get(COMPARISON_OPERATOR)
values = condition.get(ATTR_VALUE_LIST, [])
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_filter_expression(self, table_name, filters, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the FilterExpression needed for Query and Scan operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(filters.keys()):
condition = filters[key]
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST, [])
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_condition(self, table_name, attribute_name, operator, *values):
values = [
{self.get_attribute_type(table_name, attribute_name, value): self.parse_attribute(value)}
for value in values
]
return getattr(Path([attribute_name]), operator)(*values)
def _check_actions(self, actions, attribute_updates):
if actions is not None:
if attribute_updates is not None:
raise ValueError("Legacy attribute updates cannot be used with update actions")
else:
if attribute_updates is not None:
warnings.warn("Legacy attribute updates are deprecated in favor of update actions")
def _check_condition(self, name, condition, expected_or_filter, conditional_operator):
if condition is not None:
if not isinstance(condition, Condition):
raise ValueError("'{0}' must be an instance of Condition".format(name))
if expected_or_filter or conditional_operator is not None:
raise ValueError("Legacy conditional parameters cannot be used with condition expressions")
else:
if expected_or_filter or conditional_operator is not None:
warnings.warn("Legacy conditional parameters are deprecated in favor of condition expressions")
@staticmethod
def _reverse_dict(d):
return dict((v, k) for k, v in six.iteritems(d))
def _convert_binary(attr):
if BINARY_SHORT in attr:
attr[BINARY_SHORT] = b64decode(attr[BINARY_SHORT].encode(DEFAULT_ENCODING))
elif BINARY_SET_SHORT in attr:
value = attr[BINARY_SET_SHORT]
if value and len(value):
attr[BINARY_SET_SHORT] = set(b64decode(v.encode(DEFAULT_ENCODING)) for v in value)
|
__init__
|
Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A service for querying data for charts.
Functions for querying the IssueSnapshot table and associated join tables.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import settings
import time
from framework import framework_helpers
from framework import sql
from search import search_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
from search import query2ast
from search import ast2select
from search import ast2ast
ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot'
ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc'
ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component'
ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label'
ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id',
'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end',
'is_open']
ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id']
ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id']
ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id']
class ChartService(object):
"""Class for querying chart data."""
# MASKED: __init__ function (lines 44-59)
def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids,
project, perms, group_by=None, label_prefix=None,
query=None, canned_query=None):
"""Queries historical issue counts grouped by label or component.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
unixtime: An integer representing the Unix time in seconds.
effective_ids: The effective User IDs associated with the current user.
project: A project object representing the current project.
perms: A permissions object associated with the current user.
group_by (str, optional): Which dimension to group by. Values can
be 'label', 'component', or None, in which case no grouping will
be applied.
label_prefix: Required when group_by is 'label.' Will limit the query to
only labels with the specified prefix (for example 'Pri').
query (str, optional): A query string from the request to apply to
the snapshot query.
canned_query (str, optional): Parsed canned query applied to the query
scope.
Returns:
1. A dict of {'2nd dimension or "total"': number of occurences}.
2. A list of any unsupported query conditions in query.
3. A boolean that is true if any results were capped.
"""
project_config = services.config.GetProjectConfig(cnxn,
project.project_id)
try:
query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
cnxn, services, project_config, query, canned_query, project)
except ast2select.NoPossibleResults:
return {}, ['Invalid query.'], False
restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
cnxn, None, self.config_service, effective_ids, project, perms)
left_joins = [
('Issue ON IssueSnapshot.issue_id = Issue.id', []),
]
if restricted_label_ids:
left_joins.append(
(('Issue2Label AS Forbidden_label'
' ON Issue.id = Forbidden_label.issue_id'
' AND Forbidden_label.label_id IN (%s)' % (
sql.PlaceHolders(restricted_label_ids)
)), restricted_label_ids))
if effective_ids:
left_joins.append(
('Issue2Cc AS I2cc'
' ON Issue.id = I2cc.issue_id'
' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
effective_ids))
# TODO(jeffcarp): Handle case where there are issues with no labels.
where = [
('IssueSnapshot.period_start <= %s', [unixtime]),
('IssueSnapshot.period_end > %s', [unixtime]),
('IssueSnapshot.project_id = %s', [project.project_id]),
('Issue.is_spam = %s', [False]),
('Issue.deleted = %s', [False]),
]
forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
if effective_ids:
if restricted_label_ids:
forbidden_label_clause = ' OR %s' % forbidden_label_clause
else:
forbidden_label_clause = ''
where.append(
((
'(Issue.reporter_id IN (%s)'
' OR Issue.owner_id IN (%s)'
' OR I2cc.cc_id IS NOT NULL'
'%s)'
) % (
sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids),
forbidden_label_clause
),
list(effective_ids) + list(effective_ids)
))
else:
where.append((forbidden_label_clause, []))
if group_by == 'component':
cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Component AS Is2c ON'
' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
])
group_by = ['Comp.path']
elif group_by == 'label':
cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Label AS Is2l'
' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
])
if not label_prefix:
raise ValueError('`label_prefix` required when grouping by label.')
# TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output,
# ensure regex is case-insensitive.
where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
group_by = ['Lab.label']
elif group_by == 'open':
cols = ['IssueSnapshot.is_open',
'COUNT(IssueSnapshot.issue_id) AS issue_count']
group_by = ['IssueSnapshot.is_open']
elif group_by == 'status':
left_joins.append(('StatusDef AS Stats ON ' \
'Stats.id = IssueSnapshot.status_id', []))
cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['Stats.status']
elif group_by == 'owner':
cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['IssueSnapshot.owner_id']
elif not group_by:
cols = ['IssueSnapshot.issue_id']
else:
raise ValueError('`group_by` must be label, component, ' \
'open, status, owner or None.')
if query_left_joins:
left_joins.extend(query_left_joins)
if query_where:
where.extend(query_where)
promises = []
for shard_id in range(settings.num_logical_shards):
count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
where=where, joins=left_joins, group_by=group_by,
shard_id=shard_id)
promises.append(framework_helpers.Promise(cnxn.Execute,
count_stmt, stmt_args, shard_id=shard_id))
shard_values_dict = {}
search_limit_reached = False
for promise in promises:
# Wait for each query to complete and add it to the dict.
shard_values = list(promise.WaitAndGetValue())
if not shard_values:
continue
if group_by:
for name, count in shard_values:
if count >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault(name, 0)
shard_values_dict[name] += count
else:
if shard_values[0][0] >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault('total', 0)
shard_values_dict['total'] += shard_values[0][0]
unsupported_field_names = list(set([
field.field_name
for cond in unsupported_conds
for field in cond.field_defs
]))
return shard_values_dict, unsupported_field_names, search_limit_reached
def StoreIssueSnapshots(self, cnxn, issues, commit=True):
"""Adds an IssueSnapshot and updates the previous one for each issue."""
for issue in issues:
right_now = self._currentTime()
# Update previous snapshot of current issue's end time to right now.
self.issuesnapshot_tbl.Update(cnxn,
delta={'period_end': right_now},
where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]),
('IssueSnapshot.period_end = %s',
[settings.maximum_snapshot_period_end])],
commit=commit)
config = self.config_service.GetProjectConfig(cnxn, issue.project_id)
period_end = settings.maximum_snapshot_period_end
is_open = tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
shard = issue.issue_id % settings.num_logical_shards
status = tracker_bizobj.GetStatus(issue)
status_id = self.config_service.LookupStatusID(
cnxn, issue.project_id, status) or None
owner_id = tracker_bizobj.GetOwnerId(issue) or None
issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id,
issue.local_id, issue.reporter_id, owner_id, status_id, right_now,
period_end, is_open)]
ids = self.issuesnapshot_tbl.InsertRows(
cnxn, ISSUESNAPSHOT_COLS[1:],
issuesnapshot_rows,
replace=True, commit=commit,
return_generated_ids=True)
issuesnapshot_id = ids[0]
# Add all labels to IssueSnapshot2Label.
label_rows = [
(issuesnapshot_id,
self.config_service.LookupLabelID(cnxn, issue.project_id, label))
for label in tracker_bizobj.GetLabels(issue)
]
self.issuesnapshot2label_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2LABEL_COLS,
label_rows, replace=True, commit=commit)
# Add all CCs to IssueSnapshot2Cc.
cc_rows = [
(issuesnapshot_id, cc_id)
for cc_id in tracker_bizobj.GetCcIds(issue)
]
self.issuesnapshot2cc_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2CC_COLS,
cc_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Component.
component_rows = [
(issuesnapshot_id, component_id)
for component_id in issue.component_ids
]
self.issuesnapshot2component_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2COMPONENT_COLS,
component_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Hotlist.
# This is raw SQL to obviate passing FeaturesService down through
# the call stack wherever this function is called.
# TODO(jrobbins): sort out dependencies between service classes.
cnxn.Execute('''
INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id)
SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s
''', [issuesnapshot_id, issue.issue_id])
def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids):
"""Expunge the existence of hotlists from issue snapshots.
This method will not commit the operation. This method will not make
changes to in-memory data.
Args:
cnxn: connection to SQL database.
hotlist_ids: list of hotlist_ids for hotlists we want to delete.
"""
vals_ph = sql.PlaceHolders(hotlist_ids)
cnxn.Execute(
'DELETE FROM IssueSnapshot2Hotlist '
'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph),
hotlist_ids,
commit=False)
def _currentTime(self):
"""This is a separate method so it can be mocked by tests."""
return time.time()
def _QueryToWhere(self, cnxn, services, project_config, query, canned_query,
project):
"""Parses a query string into LEFT JOIN and WHERE conditions.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
project_config: The configuration for the given project.
query (string): The query to parse.
canned_query (string): The supplied canned query.
project: The current project.
Returns:
1. A list of LEFT JOIN clauses for the SQL query.
2. A list of WHERE clases for the SQL query.
3. A list of query conditions that are unsupported with snapshots.
"""
if not (query or canned_query):
return [], [], []
query = query or ''
scope = canned_query or ''
query_ast = query2ast.ParseUserQuery(query, scope,
query2ast.BUILTIN_ISSUE_FIELDS, project_config)
query_ast = ast2ast.PreprocessAST(cnxn, query_ast, [project.project_id],
services, project_config)
left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast,
snapshot_mode=True)
return left_joins, where, unsupported
def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id):
"""Given SQL arguments, executes a snapshot COUNT query."""
stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True)
stmt.AddJoinClauses(joins, left=True)
stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])])
if group_by:
stmt.AddGroupByTerms(group_by)
stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0)
stmt_str, stmt_args = stmt.Generate()
if group_by:
if group_by[0] == 'IssueSnapshot.is_open':
count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \
'AS bool_open, results.issue_count ' \
'FROM (%s) AS results' % stmt_str)
else:
count_stmt = stmt_str
else:
count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % (
stmt_str)
return count_stmt, stmt_args
|
def __init__(self, config_service):
"""Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
"""
self.config_service = config_service
# Set up SQL table objects.
self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME)
self.issuesnapshot2cc_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2CC_TABLE_NAME)
self.issuesnapshot2component_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2COMPONENT_TABLE_NAME)
self.issuesnapshot2label_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2LABEL_TABLE_NAME)
| 44 | 59 |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A service for querying data for charts.
Functions for querying the IssueSnapshot table and associated join tables.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import settings
import time
from framework import framework_helpers
from framework import sql
from search import search_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
from search import query2ast
from search import ast2select
from search import ast2ast
ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot'
ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc'
ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component'
ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label'
ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id',
'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end',
'is_open']
ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id']
ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id']
ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id']
class ChartService(object):
"""Class for querying chart data."""
def __init__(self, config_service):
"""Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
"""
self.config_service = config_service
# Set up SQL table objects.
self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME)
self.issuesnapshot2cc_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2CC_TABLE_NAME)
self.issuesnapshot2component_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2COMPONENT_TABLE_NAME)
self.issuesnapshot2label_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2LABEL_TABLE_NAME)
def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids,
project, perms, group_by=None, label_prefix=None,
query=None, canned_query=None):
"""Queries historical issue counts grouped by label or component.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
unixtime: An integer representing the Unix time in seconds.
effective_ids: The effective User IDs associated with the current user.
project: A project object representing the current project.
perms: A permissions object associated with the current user.
group_by (str, optional): Which dimension to group by. Values can
be 'label', 'component', or None, in which case no grouping will
be applied.
label_prefix: Required when group_by is 'label.' Will limit the query to
only labels with the specified prefix (for example 'Pri').
query (str, optional): A query string from the request to apply to
the snapshot query.
canned_query (str, optional): Parsed canned query applied to the query
scope.
Returns:
1. A dict of {'2nd dimension or "total"': number of occurences}.
2. A list of any unsupported query conditions in query.
3. A boolean that is true if any results were capped.
"""
project_config = services.config.GetProjectConfig(cnxn,
project.project_id)
try:
query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
cnxn, services, project_config, query, canned_query, project)
except ast2select.NoPossibleResults:
return {}, ['Invalid query.'], False
restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
cnxn, None, self.config_service, effective_ids, project, perms)
left_joins = [
('Issue ON IssueSnapshot.issue_id = Issue.id', []),
]
if restricted_label_ids:
left_joins.append(
(('Issue2Label AS Forbidden_label'
' ON Issue.id = Forbidden_label.issue_id'
' AND Forbidden_label.label_id IN (%s)' % (
sql.PlaceHolders(restricted_label_ids)
)), restricted_label_ids))
if effective_ids:
left_joins.append(
('Issue2Cc AS I2cc'
' ON Issue.id = I2cc.issue_id'
' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
effective_ids))
# TODO(jeffcarp): Handle case where there are issues with no labels.
where = [
('IssueSnapshot.period_start <= %s', [unixtime]),
('IssueSnapshot.period_end > %s', [unixtime]),
('IssueSnapshot.project_id = %s', [project.project_id]),
('Issue.is_spam = %s', [False]),
('Issue.deleted = %s', [False]),
]
forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
if effective_ids:
if restricted_label_ids:
forbidden_label_clause = ' OR %s' % forbidden_label_clause
else:
forbidden_label_clause = ''
where.append(
((
'(Issue.reporter_id IN (%s)'
' OR Issue.owner_id IN (%s)'
' OR I2cc.cc_id IS NOT NULL'
'%s)'
) % (
sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids),
forbidden_label_clause
),
list(effective_ids) + list(effective_ids)
))
else:
where.append((forbidden_label_clause, []))
if group_by == 'component':
cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Component AS Is2c ON'
' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
])
group_by = ['Comp.path']
elif group_by == 'label':
cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Label AS Is2l'
' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
])
if not label_prefix:
raise ValueError('`label_prefix` required when grouping by label.')
# TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output,
# ensure regex is case-insensitive.
where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
group_by = ['Lab.label']
elif group_by == 'open':
cols = ['IssueSnapshot.is_open',
'COUNT(IssueSnapshot.issue_id) AS issue_count']
group_by = ['IssueSnapshot.is_open']
elif group_by == 'status':
left_joins.append(('StatusDef AS Stats ON ' \
'Stats.id = IssueSnapshot.status_id', []))
cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['Stats.status']
elif group_by == 'owner':
cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['IssueSnapshot.owner_id']
elif not group_by:
cols = ['IssueSnapshot.issue_id']
else:
raise ValueError('`group_by` must be label, component, ' \
'open, status, owner or None.')
if query_left_joins:
left_joins.extend(query_left_joins)
if query_where:
where.extend(query_where)
promises = []
for shard_id in range(settings.num_logical_shards):
count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
where=where, joins=left_joins, group_by=group_by,
shard_id=shard_id)
promises.append(framework_helpers.Promise(cnxn.Execute,
count_stmt, stmt_args, shard_id=shard_id))
shard_values_dict = {}
search_limit_reached = False
for promise in promises:
# Wait for each query to complete and add it to the dict.
shard_values = list(promise.WaitAndGetValue())
if not shard_values:
continue
if group_by:
for name, count in shard_values:
if count >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault(name, 0)
shard_values_dict[name] += count
else:
if shard_values[0][0] >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault('total', 0)
shard_values_dict['total'] += shard_values[0][0]
unsupported_field_names = list(set([
field.field_name
for cond in unsupported_conds
for field in cond.field_defs
]))
return shard_values_dict, unsupported_field_names, search_limit_reached
def StoreIssueSnapshots(self, cnxn, issues, commit=True):
"""Adds an IssueSnapshot and updates the previous one for each issue."""
for issue in issues:
right_now = self._currentTime()
# Update previous snapshot of current issue's end time to right now.
self.issuesnapshot_tbl.Update(cnxn,
delta={'period_end': right_now},
where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]),
('IssueSnapshot.period_end = %s',
[settings.maximum_snapshot_period_end])],
commit=commit)
config = self.config_service.GetProjectConfig(cnxn, issue.project_id)
period_end = settings.maximum_snapshot_period_end
is_open = tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
shard = issue.issue_id % settings.num_logical_shards
status = tracker_bizobj.GetStatus(issue)
status_id = self.config_service.LookupStatusID(
cnxn, issue.project_id, status) or None
owner_id = tracker_bizobj.GetOwnerId(issue) or None
issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id,
issue.local_id, issue.reporter_id, owner_id, status_id, right_now,
period_end, is_open)]
ids = self.issuesnapshot_tbl.InsertRows(
cnxn, ISSUESNAPSHOT_COLS[1:],
issuesnapshot_rows,
replace=True, commit=commit,
return_generated_ids=True)
issuesnapshot_id = ids[0]
# Add all labels to IssueSnapshot2Label.
label_rows = [
(issuesnapshot_id,
self.config_service.LookupLabelID(cnxn, issue.project_id, label))
for label in tracker_bizobj.GetLabels(issue)
]
self.issuesnapshot2label_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2LABEL_COLS,
label_rows, replace=True, commit=commit)
# Add all CCs to IssueSnapshot2Cc.
cc_rows = [
(issuesnapshot_id, cc_id)
for cc_id in tracker_bizobj.GetCcIds(issue)
]
self.issuesnapshot2cc_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2CC_COLS,
cc_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Component.
component_rows = [
(issuesnapshot_id, component_id)
for component_id in issue.component_ids
]
self.issuesnapshot2component_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2COMPONENT_COLS,
component_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Hotlist.
# This is raw SQL to obviate passing FeaturesService down through
# the call stack wherever this function is called.
# TODO(jrobbins): sort out dependencies between service classes.
cnxn.Execute('''
INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id)
SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s
''', [issuesnapshot_id, issue.issue_id])
def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids):
"""Expunge the existence of hotlists from issue snapshots.
This method will not commit the operation. This method will not make
changes to in-memory data.
Args:
cnxn: connection to SQL database.
hotlist_ids: list of hotlist_ids for hotlists we want to delete.
"""
vals_ph = sql.PlaceHolders(hotlist_ids)
cnxn.Execute(
'DELETE FROM IssueSnapshot2Hotlist '
'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph),
hotlist_ids,
commit=False)
def _currentTime(self):
"""This is a separate method so it can be mocked by tests."""
return time.time()
def _QueryToWhere(self, cnxn, services, project_config, query, canned_query,
project):
"""Parses a query string into LEFT JOIN and WHERE conditions.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
project_config: The configuration for the given project.
query (string): The query to parse.
canned_query (string): The supplied canned query.
project: The current project.
Returns:
1. A list of LEFT JOIN clauses for the SQL query.
2. A list of WHERE clases for the SQL query.
3. A list of query conditions that are unsupported with snapshots.
"""
if not (query or canned_query):
return [], [], []
query = query or ''
scope = canned_query or ''
query_ast = query2ast.ParseUserQuery(query, scope,
query2ast.BUILTIN_ISSUE_FIELDS, project_config)
query_ast = ast2ast.PreprocessAST(cnxn, query_ast, [project.project_id],
services, project_config)
left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast,
snapshot_mode=True)
return left_joins, where, unsupported
def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id):
"""Given SQL arguments, executes a snapshot COUNT query."""
stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True)
stmt.AddJoinClauses(joins, left=True)
stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])])
if group_by:
stmt.AddGroupByTerms(group_by)
stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0)
stmt_str, stmt_args = stmt.Generate()
if group_by:
if group_by[0] == 'IssueSnapshot.is_open':
count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \
'AS bool_open, results.issue_count ' \
'FROM (%s) AS results' % stmt_str)
else:
count_stmt = stmt_str
else:
count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % (
stmt_str)
return count_stmt, stmt_args
|
_minimal_polynomial_sq
|
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
|
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
# MASKED: _minimal_polynomial_sq function (lines 137-175)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
| 137 | 175 |
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
_minpoly_op_algebraic_element
|
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
|
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
# MASKED: _minpoly_op_algebraic_element function (lines 178-242)
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
| 178 | 242 |
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
_minpoly_pow
|
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
|
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
# MASKED: _minpoly_pow function (lines 263-308)
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
| 263 | 308 |
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
_minpoly_compose
|
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
|
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
# MASKED: _minpoly_compose function (lines 456-536)
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
| 456 | 536 |
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
minpoly_groebner
|
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
|
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
# MASKED: minpoly_groebner function (lines 599-681)
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
| 599 | 681 |
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
|
add
|
increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
from fairseq import distributed_utils
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger('fairseq_cli.eval_lm')
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
# MASKED: add function (lines 42-52)
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args, **unused_kwargs):
assert parsed_args.path is not None, '--path required for evaluation!'
if torch.cuda.is_available() and not parsed_args.cpu:
torch.cuda.set_device(parsed_args.device_id)
utils.import_user_module(parsed_args)
logger.info(parsed_args)
if parsed_args.ipex:
import intel_pytorch_extension as ipex
if args.dnnl:
ipex.core.enable_auto_dnnl()
else:
ipex.core.disable_auto_dnnl()
if args.mix_precision:
ipex.core.enable_mix_bf16_fp32()
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
logger.info('loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(os.pathsep),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
suffix=getattr(parsed_args, "checkpoint_suffix", ""),
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=task.source_dictionary.pad(),
)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
if args.ipex:
model = model.to(device = ipex.DEVICE)
assert len(models) > 0
logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.move_to_ipex(sample) if args.ipex else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
logger.info(
'skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()])
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(
gen_timer.n, gen_timer.sum, 1. / gen_timer.avg
))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(
avg_nll_loss, 2**avg_nll_loss
))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
|
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
| 42 | 52 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
from fairseq import distributed_utils
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger('fairseq_cli.eval_lm')
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args, **unused_kwargs):
assert parsed_args.path is not None, '--path required for evaluation!'
if torch.cuda.is_available() and not parsed_args.cpu:
torch.cuda.set_device(parsed_args.device_id)
utils.import_user_module(parsed_args)
logger.info(parsed_args)
if parsed_args.ipex:
import intel_pytorch_extension as ipex
if args.dnnl:
ipex.core.enable_auto_dnnl()
else:
ipex.core.disable_auto_dnnl()
if args.mix_precision:
ipex.core.enable_mix_bf16_fp32()
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
logger.info('loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(os.pathsep),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
suffix=getattr(parsed_args, "checkpoint_suffix", ""),
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=task.source_dictionary.pad(),
)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
if args.ipex:
model = model.to(device = ipex.DEVICE)
assert len(models) > 0
logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.move_to_ipex(sample) if args.ipex else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
logger.info(
'skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()])
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(
gen_timer.n, gen_timer.sum, 1. / gen_timer.avg
))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(
avg_nll_loss, 2**avg_nll_loss
))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
|
__init__
|
Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Importer definition."""
from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata.proto import metadata_store_pb2
# Constant to access importer importing result from importer output dict.
IMPORT_RESULT_KEY = 'result'
# Constant to access artifact uri from importer exec_properties dict.
SOURCE_URI_KEY = 'artifact_uri'
# Constant to access re-import option from importer exec_properties dict.
REIMPORT_OPTION_KEY = 'reimport'
def _set_artifact_properties(artifact: types.Artifact,
properties: Optional[Dict[str, Any]],
custom_properties: Optional[Dict[str, Any]]):
"""Sets properties and custom_properties to the given artifact."""
if properties is not None:
for key, value in properties.items():
setattr(artifact, key, value)
if custom_properties is not None:
for key, value in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(
f'Unexpected custom_property value type:{type(value)}')
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result
def generate_output_dict(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None
) -> Dict[str, List[types.Artifact]]:
"""Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
a dictionary with the only key `result` whose value is the Artifact.
"""
return {
IMPORT_RESULT_KEY: [
_prepare_artifact(
metadata_handler,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport)
]
}
class ImporterDriver(base_driver.BaseDriver):
"""Driver for Importer."""
def pre_execution(
self,
input_dict: Dict[str, types.Channel],
output_dict: Dict[str, types.Channel],
exec_properties: Dict[str, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution.
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Create imported artifacts.
output_channel = output_dict[IMPORT_RESULT_KEY]
output_artifacts = generate_output_dict(
self._metadata_handler,
uri=exec_properties[SOURCE_URI_KEY],
properties=output_channel.additional_properties,
custom_properties=output_channel.additional_custom_properties,
reimport=exec_properties[REIMPORT_OPTION_KEY],
output_artifact_class=output_channel.type)
# Update execution with imported artifacts.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=output_artifacts,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
output_dict[IMPORT_RESULT_KEY] = channel_utils.as_channel(
output_artifacts[IMPORT_RESULT_KEY])
return data_types.ExecutionDecision(
input_dict={},
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=False)
class Importer(base_node.BaseNode):
"""Definition for TFX Importer.
The Importer is a special TFX node which registers an external resource into
MLMD so that downstream nodes can use the registered artifact as an input.
Here is an example to use the Importer:
```
importer = Importer(
source_uri='uri/to/schema',
artifact_type=standard_artifacts.Schema,
reimport=False).with_id('import_schema')
schema_gen = SchemaGen(
fixed_schema=importer.outputs['result'],
examples=...)
```
"""
# MASKED: __init__ function (lines 247-282)
@property
@doc_controls.do_not_generate_docs
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
"""Output Channel dict that contains imported artifacts."""
return self._output_dict
@property
@doc_controls.do_not_generate_docs
def exec_properties(self) -> Dict[str, Any]:
return {
SOURCE_URI_KEY: self._source_uri,
REIMPORT_OPTION_KEY: int(self._reimport),
}
|
def __init__(self,
source_uri: str,
artifact_type: Type[types.Artifact],
reimport: Optional[bool] = False,
properties: Optional[Dict[str, Union[str, int]]] = None,
custom_properties: Optional[Dict[str, Union[str, int]]] = None):
"""Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
"""
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
# TODO(b/161490287): remove static artifacts.
self._output_dict = {
IMPORT_RESULT_KEY:
types.Channel(
type=artifact_type,
additional_properties=properties,
additional_custom_properties=custom_properties).set_artifacts(
[artifact])
}
super().__init__(driver_class=ImporterDriver)
| 247 | 282 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Importer definition."""
from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata.proto import metadata_store_pb2
# Constant to access importer importing result from importer output dict.
IMPORT_RESULT_KEY = 'result'
# Constant to access artifact uri from importer exec_properties dict.
SOURCE_URI_KEY = 'artifact_uri'
# Constant to access re-import option from importer exec_properties dict.
REIMPORT_OPTION_KEY = 'reimport'
def _set_artifact_properties(artifact: types.Artifact,
properties: Optional[Dict[str, Any]],
custom_properties: Optional[Dict[str, Any]]):
"""Sets properties and custom_properties to the given artifact."""
if properties is not None:
for key, value in properties.items():
setattr(artifact, key, value)
if custom_properties is not None:
for key, value in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(
f'Unexpected custom_property value type:{type(value)}')
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result
def generate_output_dict(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None
) -> Dict[str, List[types.Artifact]]:
"""Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
a dictionary with the only key `result` whose value is the Artifact.
"""
return {
IMPORT_RESULT_KEY: [
_prepare_artifact(
metadata_handler,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport)
]
}
class ImporterDriver(base_driver.BaseDriver):
"""Driver for Importer."""
def pre_execution(
self,
input_dict: Dict[str, types.Channel],
output_dict: Dict[str, types.Channel],
exec_properties: Dict[str, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution.
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Create imported artifacts.
output_channel = output_dict[IMPORT_RESULT_KEY]
output_artifacts = generate_output_dict(
self._metadata_handler,
uri=exec_properties[SOURCE_URI_KEY],
properties=output_channel.additional_properties,
custom_properties=output_channel.additional_custom_properties,
reimport=exec_properties[REIMPORT_OPTION_KEY],
output_artifact_class=output_channel.type)
# Update execution with imported artifacts.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=output_artifacts,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
output_dict[IMPORT_RESULT_KEY] = channel_utils.as_channel(
output_artifacts[IMPORT_RESULT_KEY])
return data_types.ExecutionDecision(
input_dict={},
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=False)
class Importer(base_node.BaseNode):
"""Definition for TFX Importer.
The Importer is a special TFX node which registers an external resource into
MLMD so that downstream nodes can use the registered artifact as an input.
Here is an example to use the Importer:
```
importer = Importer(
source_uri='uri/to/schema',
artifact_type=standard_artifacts.Schema,
reimport=False).with_id('import_schema')
schema_gen = SchemaGen(
fixed_schema=importer.outputs['result'],
examples=...)
```
"""
def __init__(self,
source_uri: str,
artifact_type: Type[types.Artifact],
reimport: Optional[bool] = False,
properties: Optional[Dict[str, Union[str, int]]] = None,
custom_properties: Optional[Dict[str, Union[str, int]]] = None):
"""Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
"""
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
# TODO(b/161490287): remove static artifacts.
self._output_dict = {
IMPORT_RESULT_KEY:
types.Channel(
type=artifact_type,
additional_properties=properties,
additional_custom_properties=custom_properties).set_artifacts(
[artifact])
}
super().__init__(driver_class=ImporterDriver)
@property
@doc_controls.do_not_generate_docs
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
"""Output Channel dict that contains imported artifacts."""
return self._output_dict
@property
@doc_controls.do_not_generate_docs
def exec_properties(self) -> Dict[str, Any]:
return {
SOURCE_URI_KEY: self._source_uri,
REIMPORT_OPTION_KEY: int(self._reimport),
}
|
__call__
|
Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of generators to generate the final detections."""
import contextlib
from typing import List, Optional, Mapping
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import box_ops
from official.vision.beta.ops import nms
from official.vision.beta.ops import preprocess_ops
def _generate_detections_v1(boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str,
tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape
`[batch_size, N, num_classes, attribute_size]` or
`[batch_size, N, 1, attribute_size]` for attribute predictions on all
feature levels. The N is the number of total anchors on all levels. Can
be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required.
"""
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i,
nmsed_att_i) = _generate_detections_per_image(
boxes[i],
scores[i],
attributes={
att_name: att[i] for att_name, att in attributes.items()
} if attributes else {},
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _generate_detections_per_image(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape
`[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of
attribute predictions on all feature levels. The N is the number of total
anchors on all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
If set to None, `tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None.
"""
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min(num_classes_for_box - 1, i)]
scores_i = scores[:, i]
# Obtains pre_nms_top_k before running NMS.
scores_i, indices = tf.nn.top_k(
scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if soft_nms_sigma is not None:
(nmsed_indices_i,
nmsed_scores_i) = tf.image.non_max_suppression_with_scores(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
soft_nms_sigma=soft_nms_sigma,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_scores_i, max_num_detections, -1.0)
else:
(nmsed_indices_i,
nmsed_num_valid_i) = tf.image.non_max_suppression_padded(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_to_max_output_size=True,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
# Sets scores of invalid boxes to -1.
nmsed_scores_i = tf.where(
tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]),
nmsed_scores_i, -tf.ones_like(nmsed_scores_i))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for att_name, att in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min(num_classes_for_attr - 1, i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
# Concats results from all classes and sort them.
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(
tf.cast(tf.greater(nmsed_scores, -1), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name],
indices)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
"""Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`.
"""
batch_size, num_anchors, num_class = scores_in.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[batch_size, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores,
[0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])
def _generate_detections_v2(boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100):
"""Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, num_classes = scores.get_shape().as_list()
# Selects top pre_nms_num scores and indices before NMS.
scores, indices = _select_top_k_scores(
scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :]
scores_i = scores[:, :, i]
# Obtains pre_nms_top_k before running NMS.
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
# Filter out scores.
boxes_i, scores_i = box_ops.filter_boxes_by_scores(
boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(
tf.cast(scores_i, tf.float32),
tf.cast(boxes_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(
input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32), axis=1)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor,
pre_nms_score_threshold: float,
nms_iou_threshold: float,
max_num_detections: int):
"""Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=max_num_detections,
max_total_size=max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False))
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionGenerator(tf.keras.layers.Layer):
"""Generates the final detected boxes with scores and classes."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v2',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(DetectionGenerator, self).__init__(**kwargs)
def __call__(self,
raw_boxes: tf.Tensor,
raw_scores: tf.Tensor,
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
regression_weights: Optional[List[float]] = None,
bbox_per_class: bool = True):
"""Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
"""
box_scores = tf.nn.softmax(raw_scores, axis=-1)
# Removes the background class.
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[-1]
box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1])
if bbox_per_class:
num_detections = num_locations * (num_classes - 1)
raw_boxes = tf.reshape(raw_boxes,
[batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1])
anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Box decoding.
decoded_boxes = box_ops.decode_boxes(
raw_boxes, anchor_boxes, weights=regression_weights)
# Box clipping
decoded_boxes = box_ops.clip_boxes(
decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(
decoded_boxes, [batch_size, num_locations, num_classes - 1, 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': decoded_boxes,
'decoded_box_scores': box_scores,
}
# Optionally force the NMS be run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
decoded_boxes, box_scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = (
_generate_detections_v1(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelDetectionGenerator(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v1',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(MultilevelDetectionGenerator, self).__init__(**kwargs)
def _decode_multilevel_outputs(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Collects dict of multilevel boxes, scores, attributes into lists."""
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, max_level + 1):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i,
num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = feature_h_i * feature_w_i
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
num_classes = raw_scores_i.get_shape().as_list(
)[-1] // num_anchors_per_locations
# Applies score transformation and remove the implicit background class.
scores_i = tf.sigmoid(
tf.reshape(raw_scores_i, [
batch_size, num_locations * num_anchors_per_locations, num_classes
]))
scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1])
# Box decoding.
# The anchor boxes are shared for all data in a batch.
# One stage detector only supports class agnostic box regression.
anchor_boxes_i = tf.reshape(
anchor_boxes[str(i)],
[batch_size, num_locations * num_anchors_per_locations, 4])
raw_boxes_i = tf.reshape(
raw_boxes_i,
[batch_size, num_locations * num_anchors_per_locations, 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
# Box clipping.
boxes_i = box_ops.clip_boxes(
boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for att_name, raw_att in raw_attributes.items():
attribute_size = raw_att[str(
i)].get_shape().as_list()[-1] // num_anchors_per_locations
att_i = tf.reshape(raw_att[str(i)], [
batch_size, num_locations * num_anchors_per_locations,
attribute_size
])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return boxes, scores, attributes
# MASKED: __call__ function (lines 729-845)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
|
def __call__(self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
"""
boxes, scores, attributes = self._decode_multilevel_outputs(
raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': boxes,
'decoded_box_scores': scores,
'decoded_box_attributes': attributes,
}
# Optionally force the NMS to run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if raw_attributes and (self._config_dict['nms_version'] != 'v1'):
raise ValueError(
'Attribute learning is only supported for NMSv1 but NMS {} is used.'
.format(self._config_dict['nms_version']))
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
boxes, scores, self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for batched NMS.
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections,
nmsed_attributes) = (
_generate_detections_v1(
boxes,
scores,
attributes=attributes if raw_attributes else None,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
boxes,
scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for v2.
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
'detection_attributes': nmsed_attributes,
}
| 729 | 845 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of generators to generate the final detections."""
import contextlib
from typing import List, Optional, Mapping
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import box_ops
from official.vision.beta.ops import nms
from official.vision.beta.ops import preprocess_ops
def _generate_detections_v1(boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str,
tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape
`[batch_size, N, num_classes, attribute_size]` or
`[batch_size, N, 1, attribute_size]` for attribute predictions on all
feature levels. The N is the number of total anchors on all levels. Can
be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required.
"""
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i,
nmsed_att_i) = _generate_detections_per_image(
boxes[i],
scores[i],
attributes={
att_name: att[i] for att_name, att in attributes.items()
} if attributes else {},
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _generate_detections_per_image(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape
`[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of
attribute predictions on all feature levels. The N is the number of total
anchors on all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
If set to None, `tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None.
"""
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min(num_classes_for_box - 1, i)]
scores_i = scores[:, i]
# Obtains pre_nms_top_k before running NMS.
scores_i, indices = tf.nn.top_k(
scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if soft_nms_sigma is not None:
(nmsed_indices_i,
nmsed_scores_i) = tf.image.non_max_suppression_with_scores(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
soft_nms_sigma=soft_nms_sigma,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_scores_i, max_num_detections, -1.0)
else:
(nmsed_indices_i,
nmsed_num_valid_i) = tf.image.non_max_suppression_padded(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_to_max_output_size=True,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
# Sets scores of invalid boxes to -1.
nmsed_scores_i = tf.where(
tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]),
nmsed_scores_i, -tf.ones_like(nmsed_scores_i))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for att_name, att in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min(num_classes_for_attr - 1, i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
# Concats results from all classes and sort them.
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(
tf.cast(tf.greater(nmsed_scores, -1), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name],
indices)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
"""Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`.
"""
batch_size, num_anchors, num_class = scores_in.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[batch_size, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores,
[0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])
def _generate_detections_v2(boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100):
"""Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, num_classes = scores.get_shape().as_list()
# Selects top pre_nms_num scores and indices before NMS.
scores, indices = _select_top_k_scores(
scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :]
scores_i = scores[:, :, i]
# Obtains pre_nms_top_k before running NMS.
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
# Filter out scores.
boxes_i, scores_i = box_ops.filter_boxes_by_scores(
boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(
tf.cast(scores_i, tf.float32),
tf.cast(boxes_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(
input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32), axis=1)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor,
pre_nms_score_threshold: float,
nms_iou_threshold: float,
max_num_detections: int):
"""Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=max_num_detections,
max_total_size=max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False))
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionGenerator(tf.keras.layers.Layer):
"""Generates the final detected boxes with scores and classes."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v2',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(DetectionGenerator, self).__init__(**kwargs)
def __call__(self,
raw_boxes: tf.Tensor,
raw_scores: tf.Tensor,
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
regression_weights: Optional[List[float]] = None,
bbox_per_class: bool = True):
"""Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
"""
box_scores = tf.nn.softmax(raw_scores, axis=-1)
# Removes the background class.
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[-1]
box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1])
if bbox_per_class:
num_detections = num_locations * (num_classes - 1)
raw_boxes = tf.reshape(raw_boxes,
[batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1])
anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Box decoding.
decoded_boxes = box_ops.decode_boxes(
raw_boxes, anchor_boxes, weights=regression_weights)
# Box clipping
decoded_boxes = box_ops.clip_boxes(
decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(
decoded_boxes, [batch_size, num_locations, num_classes - 1, 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': decoded_boxes,
'decoded_box_scores': box_scores,
}
# Optionally force the NMS be run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
decoded_boxes, box_scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = (
_generate_detections_v1(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelDetectionGenerator(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v1',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(MultilevelDetectionGenerator, self).__init__(**kwargs)
def _decode_multilevel_outputs(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Collects dict of multilevel boxes, scores, attributes into lists."""
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, max_level + 1):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i,
num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = feature_h_i * feature_w_i
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
num_classes = raw_scores_i.get_shape().as_list(
)[-1] // num_anchors_per_locations
# Applies score transformation and remove the implicit background class.
scores_i = tf.sigmoid(
tf.reshape(raw_scores_i, [
batch_size, num_locations * num_anchors_per_locations, num_classes
]))
scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1])
# Box decoding.
# The anchor boxes are shared for all data in a batch.
# One stage detector only supports class agnostic box regression.
anchor_boxes_i = tf.reshape(
anchor_boxes[str(i)],
[batch_size, num_locations * num_anchors_per_locations, 4])
raw_boxes_i = tf.reshape(
raw_boxes_i,
[batch_size, num_locations * num_anchors_per_locations, 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
# Box clipping.
boxes_i = box_ops.clip_boxes(
boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for att_name, raw_att in raw_attributes.items():
attribute_size = raw_att[str(
i)].get_shape().as_list()[-1] // num_anchors_per_locations
att_i = tf.reshape(raw_att[str(i)], [
batch_size, num_locations * num_anchors_per_locations,
attribute_size
])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return boxes, scores, attributes
def __call__(self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
"""
boxes, scores, attributes = self._decode_multilevel_outputs(
raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': boxes,
'decoded_box_scores': scores,
'decoded_box_attributes': attributes,
}
# Optionally force the NMS to run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if raw_attributes and (self._config_dict['nms_version'] != 'v1'):
raise ValueError(
'Attribute learning is only supported for NMSv1 but NMS {} is used.'
.format(self._config_dict['nms_version']))
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
boxes, scores, self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for batched NMS.
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections,
nmsed_attributes) = (
_generate_detections_v1(
boxes,
scores,
attributes=attributes if raw_attributes else None,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
boxes,
scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for v2.
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
'detection_attributes': nmsed_attributes,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
|
locate_index
|
Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
# MASKED: locate_index function (lines 59-98)
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
"""Local instrument data provider class
Provide instrument data from local data source.
"""
def __init__(self):
pass
@property
def _uri_inst(self):
"""Instrument file uri."""
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
# `day`
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
# `1min`
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
# strip
# use calendar boundary
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
# filter
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
# as list
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
"""Local feature data provider class
Provide feature data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
"""Static feature file uri."""
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
# validate
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
# raise ValueError('uri_data not found: ' + uri_data)
# load
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
"""Local expression data provider class
Provide expression data from local data source.
"""
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
# Ensure that each column type is consistent
# FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
"""Local dataset data provider class
Provide dataset data from local data source.
"""
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
"""Client calendar data provider class
Provide calendar data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
"""Client instrument data provider class
Provide instrument data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
"""Client dataset data provider class
Provide dataset data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
"""
Call the server to generate the expression cache.
Then load the data from the expression cache directly.
- default using multi-kernel method.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
# - Done in callback
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
# pre-mound nfs, used for demo
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
"""Local provider class
To keep compatible with old qlib provider.
"""
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
"""
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
"""features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
"""
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
"""Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
"""
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
"""Data Provider Wrapper"""
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
"""register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
"""
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
"""register_all_wrappers"""
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
|
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
| 59 | 98 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
"""Local instrument data provider class
Provide instrument data from local data source.
"""
def __init__(self):
pass
@property
def _uri_inst(self):
"""Instrument file uri."""
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
# `day`
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
# `1min`
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
# strip
# use calendar boundary
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
# filter
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
# as list
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
"""Local feature data provider class
Provide feature data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
"""Static feature file uri."""
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
# validate
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
# raise ValueError('uri_data not found: ' + uri_data)
# load
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
"""Local expression data provider class
Provide expression data from local data source.
"""
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
# Ensure that each column type is consistent
# FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
"""Local dataset data provider class
Provide dataset data from local data source.
"""
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
"""Client calendar data provider class
Provide calendar data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
"""Client instrument data provider class
Provide instrument data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
"""Client dataset data provider class
Provide dataset data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
"""
Call the server to generate the expression cache.
Then load the data from the expression cache directly.
- default using multi-kernel method.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
# - Done in callback
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
# pre-mound nfs, used for demo
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
"""Local provider class
To keep compatible with old qlib provider.
"""
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
"""
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
"""features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
"""
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
"""Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
"""
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
"""Data Provider Wrapper"""
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
"""register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
"""
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
"""register_all_wrappers"""
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
|
instruments
|
Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
# MASKED: instruments function (lines 138-174)
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
"""Local instrument data provider class
Provide instrument data from local data source.
"""
def __init__(self):
pass
@property
def _uri_inst(self):
"""Instrument file uri."""
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
# `day`
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
# `1min`
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
# strip
# use calendar boundary
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
# filter
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
# as list
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
"""Local feature data provider class
Provide feature data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
"""Static feature file uri."""
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
# validate
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
# raise ValueError('uri_data not found: ' + uri_data)
# load
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
"""Local expression data provider class
Provide expression data from local data source.
"""
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
# Ensure that each column type is consistent
# FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
"""Local dataset data provider class
Provide dataset data from local data source.
"""
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
"""Client calendar data provider class
Provide calendar data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
"""Client instrument data provider class
Provide instrument data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
"""Client dataset data provider class
Provide dataset data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
"""
Call the server to generate the expression cache.
Then load the data from the expression cache directly.
- default using multi-kernel method.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
# - Done in callback
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
# pre-mound nfs, used for demo
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
"""Local provider class
To keep compatible with old qlib provider.
"""
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
"""
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
"""features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
"""
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
"""Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
"""
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
"""Data Provider Wrapper"""
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
"""register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
"""
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
"""register_all_wrappers"""
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
|
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
| 138 | 174 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
"""Local instrument data provider class
Provide instrument data from local data source.
"""
def __init__(self):
pass
@property
def _uri_inst(self):
"""Instrument file uri."""
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
# `day`
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
# `1min`
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
# strip
# use calendar boundary
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
# filter
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
# as list
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
"""Local feature data provider class
Provide feature data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
"""Static feature file uri."""
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
# validate
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
# raise ValueError('uri_data not found: ' + uri_data)
# load
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
"""Local expression data provider class
Provide expression data from local data source.
"""
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
# Ensure that each column type is consistent
# FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
"""Local dataset data provider class
Provide dataset data from local data source.
"""
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
"""Client calendar data provider class
Provide calendar data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
"""Client instrument data provider class
Provide instrument data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
"""Client dataset data provider class
Provide dataset data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
"""
Call the server to generate the expression cache.
Then load the data from the expression cache directly.
- default using multi-kernel method.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
# - Done in callback
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
# pre-mound nfs, used for demo
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
"""Local provider class
To keep compatible with old qlib provider.
"""
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
"""
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
"""features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
"""
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
"""Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
"""
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
"""Data Provider Wrapper"""
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
"""register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
"""
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
"""register_all_wrappers"""
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
|
submit_observation
|
Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
|
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
"""Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
"""
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
"""Initialize parameters of the experiment client object."""
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
# MASKED: submit_observation function (lines 47-81)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
"""Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
def best_configuration(self):
"""Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
def pending_recommendations(self):
"""Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
"""Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
"""
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
|
def submit_observation(self, config, target):
"""Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
| 47 | 81 |
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
"""Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
"""
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
"""Initialize parameters of the experiment client object."""
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
def submit_observation(self, config, target):
"""Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
"""Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
def best_configuration(self):
"""Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
def pending_recommendations(self):
"""Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
"""Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
"""
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
|
best_configuration
|
Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
|
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
"""Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
"""
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
"""Initialize parameters of the experiment client object."""
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
def submit_observation(self, config, target):
"""Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
"""Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
# MASKED: best_configuration function (lines 145-163)
def pending_recommendations(self):
"""Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
"""Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
"""
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
|
def best_configuration(self):
"""Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
| 145 | 163 |
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
"""Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
"""
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
"""Initialize parameters of the experiment client object."""
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
def submit_observation(self, config, target):
"""Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
"""Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
def best_configuration(self):
"""Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
def pending_recommendations(self):
"""Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
"""Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
"""
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
|
write_frames
|
Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written
|
# Copyright (C) 2019 by Landmark Acoustics LLC
r"""A class to write a WAV-formatted file."""
import wave
class WaveFile:
'''A wrapper for `Wave_write` from Python STL's `wave` module.
Parameters
----------
name : str
The name to save the file as. It should include path and extension.
sample_rate : int
The number of samples per second that the file will use.
bit_rate : int
The number of bits the file will use per sample.
channels : int
The number of channels that the file has.
See Also
--------
wave : the Python STL module
'''
def __init__(self,
name: str,
sample_rate: int,
bit_rate: int,
channels: int) -> None:
self._channels = channels
self._sample_rate = sample_rate
self._byte_rate = bit_rate // 8
self._filehandle = wave.open(name, 'wb')
self._filehandle.setnchannels(self.channels)
self._filehandle.setsampwidth(self.byte_rate)
self._filehandle.setframerate(self.sample_rate)
@property
def channels(self) -> int:
'''The number of channels the file has.'''
return self._channels
@property
def sample_rate(self) -> int:
'''The number of samples per second.'''
return self._sample_rate
@property
def byte_rate(self) -> int:
'''The number of bytes per sample.'''
return self._byte_rate
@property
def bit_rate(self) -> int:
'''The number of bits per sample.'''
return self.byte_rate * 8
# MASKED: write_frames function (lines 63-79)
@property
def frame_size(self) -> int:
'''The number of bytes per frame.'''
return self.byte_rate * self.channels
def __enter__(self):
self._filehandle.__enter__()
return self
def __exit__(self, *args, **kwargs):
return self._filehandle.__exit__(*args, **kwargs)
if __name__ == '__main__':
import array
import sys
wvf = WaveFile(sys.argv[1], 44100, 28, 3)
a = array.array('b')
a.extend([0 for i in range(12000 * wvf.frame_size)])
N = wvf.write_frames(a)
print(f'Wrote {N} frames in {wvf.channels} {wvf.bit_rate}-bit channels.')
|
def write_frames(self, data) -> int:
'''Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written
'''
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return self._filehandle.tell() - pos
| 63 | 79 |
# Copyright (C) 2019 by Landmark Acoustics LLC
r"""A class to write a WAV-formatted file."""
import wave
class WaveFile:
'''A wrapper for `Wave_write` from Python STL's `wave` module.
Parameters
----------
name : str
The name to save the file as. It should include path and extension.
sample_rate : int
The number of samples per second that the file will use.
bit_rate : int
The number of bits the file will use per sample.
channels : int
The number of channels that the file has.
See Also
--------
wave : the Python STL module
'''
def __init__(self,
name: str,
sample_rate: int,
bit_rate: int,
channels: int) -> None:
self._channels = channels
self._sample_rate = sample_rate
self._byte_rate = bit_rate // 8
self._filehandle = wave.open(name, 'wb')
self._filehandle.setnchannels(self.channels)
self._filehandle.setsampwidth(self.byte_rate)
self._filehandle.setframerate(self.sample_rate)
@property
def channels(self) -> int:
'''The number of channels the file has.'''
return self._channels
@property
def sample_rate(self) -> int:
'''The number of samples per second.'''
return self._sample_rate
@property
def byte_rate(self) -> int:
'''The number of bytes per sample.'''
return self._byte_rate
@property
def bit_rate(self) -> int:
'''The number of bits per sample.'''
return self.byte_rate * 8
def write_frames(self, data) -> int:
'''Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written
'''
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return self._filehandle.tell() - pos
@property
def frame_size(self) -> int:
'''The number of bytes per frame.'''
return self.byte_rate * self.channels
def __enter__(self):
self._filehandle.__enter__()
return self
def __exit__(self, *args, **kwargs):
return self._filehandle.__exit__(*args, **kwargs)
if __name__ == '__main__':
import array
import sys
wvf = WaveFile(sys.argv[1], 44100, 28, 3)
a = array.array('b')
a.extend([0 for i in range(12000 * wvf.frame_size)])
N = wvf.write_frames(a)
print(f'Wrote {N} frames in {wvf.channels} {wvf.bit_rate}-bit channels.')
|
tokenize
|
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
|
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
def email_mapper(df):
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[user_item.isna() == False] = 1
return user_item # return the user_item matrix
def get_top_articles(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids # Return the top article ids
def user_user_recs(user_id, user_item, df, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
def get_user_articles_names_ids(user_id):
'''
INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[df['article_id'] == i]['title'].unique()[0]
except IndexError:
title ="None"
article_names.append(title)
article_ids = list(map(str, article_ids))
return article_ids, article_names # return the ids and names
def find_similar_users():
'''
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # 2. Select a row
result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids
return most_similar_users # return a list of the users in order from most to least similar
def get_top_sorted_users(most_similar_users):
'''
INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Make neighbor_id column
df_user_id_grouped =df.groupby("user_id")
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
# make similarity column
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # Select a row
result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
# Make num_interactions column
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df # Return the dataframe specified in the doc_string
recs = []
rec_names =[]
counter = 0
# Get seen article ids and names from selected user id
article_ids, article_names = get_user_articles_names_ids(user_id)
# Make set to find unseen articles
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
# Find similar users of the selected user
similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df
# Make recommendation list
for sim_user in similar_users_list:
if counter < m:
# Get seen article ids and names from similar users
sim_article_ids, sim_article_names = get_user_articles_names_ids(sim_user)
# Make dict (key: article_ids, value:article_names)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
# Make set to find unseen articles
sim_seen_ids_set = set(sim_article_ids)
# Create set of unseen articles_ids
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if counter < m:
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return recs, rec_names
###
def make_Tfidf_array(df_content):
# MASKED: tokenize function (lines 228-252)
corpus = df_content['doc_description']
df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True)
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# Text Processing, Feature Extraction
vect = TfidfVectorizer(tokenizer=tokenize)
# get counts of each token (word) in text data
X = vect.fit_transform(corpus)
X = X.toarray()
return vect, X
def make_content_recs(article_id, df_content, df, m=10):
'''
INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
'''
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
vect, X = make_Tfidf_array(df_content)
if article_id in df_content.article_id:
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
# Get title of the document of interest
booktitle = df[df['article_id'] == article_id]['title'].values[0]
# Tokenize the title
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if i in tfidf_feature_name:
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:,X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values("sum", ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
return recs, rec_names
|
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
| 228 | 252 |
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
def email_mapper(df):
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[user_item.isna() == False] = 1
return user_item # return the user_item matrix
def get_top_articles(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids # Return the top article ids
def user_user_recs(user_id, user_item, df, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
def get_user_articles_names_ids(user_id):
'''
INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[df['article_id'] == i]['title'].unique()[0]
except IndexError:
title ="None"
article_names.append(title)
article_ids = list(map(str, article_ids))
return article_ids, article_names # return the ids and names
def find_similar_users():
'''
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # 2. Select a row
result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids
return most_similar_users # return a list of the users in order from most to least similar
def get_top_sorted_users(most_similar_users):
'''
INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Make neighbor_id column
df_user_id_grouped =df.groupby("user_id")
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
# make similarity column
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # Select a row
result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
# Make num_interactions column
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df # Return the dataframe specified in the doc_string
recs = []
rec_names =[]
counter = 0
# Get seen article ids and names from selected user id
article_ids, article_names = get_user_articles_names_ids(user_id)
# Make set to find unseen articles
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
# Find similar users of the selected user
similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df
# Make recommendation list
for sim_user in similar_users_list:
if counter < m:
# Get seen article ids and names from similar users
sim_article_ids, sim_article_names = get_user_articles_names_ids(sim_user)
# Make dict (key: article_ids, value:article_names)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
# Make set to find unseen articles
sim_seen_ids_set = set(sim_article_ids)
# Create set of unseen articles_ids
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if counter < m:
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return recs, rec_names
###
def make_Tfidf_array(df_content):
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
corpus = df_content['doc_description']
df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True)
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# Text Processing, Feature Extraction
vect = TfidfVectorizer(tokenizer=tokenize)
# get counts of each token (word) in text data
X = vect.fit_transform(corpus)
X = X.toarray()
return vect, X
def make_content_recs(article_id, df_content, df, m=10):
'''
INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
'''
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
vect, X = make_Tfidf_array(df_content)
if article_id in df_content.article_id:
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
# Get title of the document of interest
booktitle = df[df['article_id'] == article_id]['title'].values[0]
# Tokenize the title
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if i in tfidf_feature_name:
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:,X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values("sum", ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
return recs, rec_names
|
tokenize
|
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
|
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
def email_mapper(df):
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[user_item.isna() == False] = 1
return user_item # return the user_item matrix
def get_top_articles(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids # Return the top article ids
def user_user_recs(user_id, user_item, df, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
def get_user_articles_names_ids(user_id):
'''
INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[df['article_id'] == i]['title'].unique()[0]
except IndexError:
title ="None"
article_names.append(title)
article_ids = list(map(str, article_ids))
return article_ids, article_names # return the ids and names
def find_similar_users():
'''
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # 2. Select a row
result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids
return most_similar_users # return a list of the users in order from most to least similar
def get_top_sorted_users(most_similar_users):
'''
INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Make neighbor_id column
df_user_id_grouped =df.groupby("user_id")
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
# make similarity column
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # Select a row
result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
# Make num_interactions column
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df # Return the dataframe specified in the doc_string
recs = []
rec_names =[]
counter = 0
# Get seen article ids and names from selected user id
article_ids, article_names = get_user_articles_names_ids(user_id)
# Make set to find unseen articles
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
# Find similar users of the selected user
similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df
# Make recommendation list
for sim_user in similar_users_list:
if counter < m:
# Get seen article ids and names from similar users
sim_article_ids, sim_article_names = get_user_articles_names_ids(sim_user)
# Make dict (key: article_ids, value:article_names)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
# Make set to find unseen articles
sim_seen_ids_set = set(sim_article_ids)
# Create set of unseen articles_ids
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if counter < m:
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return recs, rec_names
###
def make_Tfidf_array(df_content):
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
corpus = df_content['doc_description']
df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True)
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# Text Processing, Feature Extraction
vect = TfidfVectorizer(tokenizer=tokenize)
# get counts of each token (word) in text data
X = vect.fit_transform(corpus)
X = X.toarray()
return vect, X
def make_content_recs(article_id, df_content, df, m=10):
'''
INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
'''
# MASKED: tokenize function (lines 279-303)
vect, X = make_Tfidf_array(df_content)
if article_id in df_content.article_id:
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
# Get title of the document of interest
booktitle = df[df['article_id'] == article_id]['title'].values[0]
# Tokenize the title
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if i in tfidf_feature_name:
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:,X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values("sum", ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
return recs, rec_names
|
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
| 279 | 303 |
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
def email_mapper(df):
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item = df.groupby('user_id')['article_id'].value_counts().unstack()
user_item[user_item.isna() == False] = 1
return user_item # return the user_item matrix
def get_top_articles(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['title'])
top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_id_grouped_df = df.groupby(['article_id'])
top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()
return top_articles_ids # Return the top article ids
def user_user_recs(user_id, user_item, df, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
def get_user_articles_names_ids(user_id):
'''
INPUT:
user_id
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist()
article_names = []
for i in article_ids:
try:
title = df[df['article_id'] == i]['title'].unique()[0]
except IndexError:
title ="None"
article_names.append(title)
article_ids = list(map(str, article_ids))
return article_ids, article_names # return the ids and names
def find_similar_users():
'''
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # 2. Select a row
result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids
return most_similar_users # return a list of the users in order from most to least similar
def get_top_sorted_users(most_similar_users):
'''
INPUT:
most_similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Make neighbor_id column
df_user_id_grouped =df.groupby("user_id")
df_user_id_grouped['article_id'].count().sort_values(ascending=False)
neighbors_df = pd.DataFrame()
neighbors_df['neighbor_id'] = most_similar_users
# make similarity column
user_item_tmp = user_item.copy()
user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0
row = user_item_tmp.loc[user_id] # Select a row
result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix
result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id
similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10]
neighbors_df['similarity'] = similarity
# Make num_interactions column
num_interactions = []
for i in neighbors_df['neighbor_id']:
counted_interaction = df_user_id_grouped['article_id'].count().loc[i]
num_interactions.append(counted_interaction)
neighbors_df['num_interactions'] = num_interactions
neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)
return neighbors_df # Return the dataframe specified in the doc_string
recs = []
rec_names =[]
counter = 0
# Get seen article ids and names from selected user id
article_ids, article_names = get_user_articles_names_ids(user_id)
# Make set to find unseen articles
seen_ids_set = set(article_ids)
most_similar_users = find_similar_users()[0:10]
neighbors_df = get_top_sorted_users(most_similar_users)
# Find similar users of the selected user
similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df
# Make recommendation list
for sim_user in similar_users_list:
if counter < m:
# Get seen article ids and names from similar users
sim_article_ids, sim_article_names = get_user_articles_names_ids(sim_user)
# Make dict (key: article_ids, value:article_names)
sim_user_dict = dict(zip(sim_article_ids, sim_article_names))
# Make set to find unseen articles
sim_seen_ids_set = set(sim_article_ids)
# Create set of unseen articles_ids
unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)
for i in unseen_ids_set:
if counter < m:
recs.append(i)
rec_names.append(sim_user_dict[i])
counter += 1
return recs, rec_names
###
def make_Tfidf_array(df_content):
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
corpus = df_content['doc_description']
df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True)
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# Text Processing, Feature Extraction
vect = TfidfVectorizer(tokenizer=tokenize)
# get counts of each token (word) in text data
X = vect.fit_transform(corpus)
X = X.toarray()
return vect, X
def make_content_recs(article_id, df_content, df, m=10):
'''
INPUT:
article_id = (int) a article id in df_content
m - (int) the number of recommendations you want for the user
df_content - (pandas dataframe) df_content as defined at the top of the notebook
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
'''
def tokenize(text):
'''
Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word.
The funtions also cleans irrelevant stopwords.
Input:
1. text: text message
Output:
1. Clean_tokens : list of tokenized clean words
'''
# Get rid of other sepcial characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize
tokens = word_tokenize(text)
# Lemmatize
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()
clean_tokens.append(clean_tok)
# Remove stop words
stopwords = nltk.corpus.stopwords.words('english')
clean_tokens = [token for token in clean_tokens if token not in stopwords]
return clean_tokens
vect, X = make_Tfidf_array(df_content)
if article_id in df_content.article_id:
cosine_similarity = linear_kernel(X, X)
df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])
df_similarity_modified = df_similarity.drop(article_id)
recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
else:
tfidf_feature_name = vect.get_feature_names()
# Get title of the document of interest
booktitle = df[df['article_id'] == article_id]['title'].values[0]
# Tokenize the title
booktitle_tokenized = tokenize(booktitle)
X_slice_list = []
for i in booktitle_tokenized:
if i in tfidf_feature_name:
X_slice_list.append(tfidf_feature_name.index(i))
X_slice_list.sort()
X_sliced = X[:,X_slice_list]
check_df = pd.DataFrame(X_sliced, columns=X_slice_list)
check_df['sum'] = check_df.sum(axis=1)
recs = check_df.sort_values("sum", ascending=False)[0:10].index.tolist()
rec_names = []
for i in recs:
name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]
rec_names.append(name)
return recs, rec_names
|
__init__
|
__init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
# MASKED: __init__ function (lines 168-230)
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
| 168 | 230 |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
draw_track
|
draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
# MASKED: draw_track function (lines 318-337)
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
| 318 | 337 |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
draw_feature_set
|
draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
# MASKED: draw_feature_set function (lines 340-359)
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
| 340 | 359 |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
draw_feature
|
draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
# MASKED: draw_feature function (lines 362-384)
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
| 362 | 384 |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
draw_graph_set
|
draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
# MASKED: draw_graph_set function (lines 479-499)
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
| 479 | 499 |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# Revisions copyright 2008-2009 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" CircularDrawer module
Provides:
o CircularDrawer - Drawing object for circular diagrams
For drawing capabilities, this module uses reportlab to draw and write
the diagram:
http://www.reportlab.com
For dealing with biological information, the package expects BioPython
objects:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.graphics.shapes import *
from reportlab.lib import colors
from reportlab.pdfbase import _fontdata
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from _AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from _FeatureSet import FeatureSet
from _GraphSet import GraphSet
from math import ceil, pi, cos, sin, asin
class CircularDrawer(AbstractDrawer):
""" CircularDrawer(AbstractDrawer)
Inherits from:
o AbstractDrawer
Provides:
Methods:
o __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1) Called on instantiation
o set_page_size(self, pagesize, orientation) Set the page size to the
passed size and orientation
o set_margins(self, x, y, xl, xr, yt, yb) Set the drawable area of the
page
o set_bounds(self, start, end) Set the bounds for the elements to be
drawn
o is_in_bounds(self, value) Returns a boolean for whether the position
is actually to be drawn
o __len__(self) Returns the length of sequence that will be drawn
o draw(self) Place the drawing elements on the diagram
o init_fragments(self) Calculate information
about sequence fragment locations on the drawing
o set_track_heights(self) Calculate information about the offset of
each track from the fragment base
o draw_test_tracks(self) Add lines demarcating each track to the
drawing
o draw_track(self, track) Return the contents of the passed track as
drawing elements
o draw_scale(self, track) Return a scale for the passed track as
drawing elements
o draw_greytrack(self, track) Return a grey background and superposed
label for the passed track as drawing
elements
o draw_feature_set(self, set) Return the features in the passed set as
drawing elements
o draw_feature(self, feature) Return a single feature as drawing
elements
o get_feature_sigil(self, feature, x0, x1, fragment) Return a single
feature as its sigil in drawing elements
o draw_graph_set(self, set) Return the data in a set of graphs as
drawing elements
o draw_line_graph(self, graph) Return the data in a graph as a line
graph in drawing elements
o draw_heat_graph(self, graph) Return the data in a graph as a heat
graph in drawing elements
o draw_bar_graph(self, graph) Return the data in a graph as a bar
graph in drawing elements
o canvas_angle(self, base) Return the angle, and cos and sin of
that angle, subtended by the passed
base position at the diagram center
o draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color) Return a drawable element describing an arc
Attributes:
o tracklines Boolean for whether to draw lines dilineating tracks
o pagesize Tuple describing the size of the page in pixels
o x0 Float X co-ord for leftmost point of drawable area
o xlim Float X co-ord for rightmost point of drawable area
o y0 Float Y co-ord for lowest point of drawable area
o ylim Float Y co-ord for topmost point of drawable area
o pagewidth Float pixel width of drawable area
o pageheight Float pixel height of drawable area
o xcenter Float X co-ord of center of drawable area
o ycenter Float Y co-ord of center of drawable area
o start Int, base to start drawing from
o end Int, base to stop drawing at
o length Size of sequence to be drawn
o track_size Float (0->1) the proportion of the track height to
draw in
o drawing Drawing canvas
o drawn_tracks List of ints denoting which tracks are to be drawn
o current_track_level Int denoting which track is currently being
drawn
o track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a
fragment, keyed by track
o sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
"""
def __init__(self, parent=None, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1):
""" __init__(self, parent, pagesize='A3', orientation='landscape',
x=0.05, y=0.05, xl=None, xr=None, yt=None, yb=None,
start=None, end=None, tracklines=0, track_size=0.75,
circular=1)
o parent Diagram object containing the data that the drawer
draws
o pagesize String describing the ISO size of the image, or a tuple
of pixels
o orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
o x Float (0->1) describing the relative size of the X
margins to the page
o y Float (0->1) describing the relative size of the Y
margins to the page
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
o xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
o yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
o yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
o start Int, the position to begin drawing the diagram at
o end Int, the position to stop drawing the diagram at
o tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
o track_size The proportion of the available track height that
should be taken up in drawing
o circular Boolean flaw to show whether the passed sequence is
circular or not
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(self, parent, pagesize, orientation,
x, y, xl, xr, yt, yb, start, end,
tracklines)
# Useful measurements on the page
self.track_size = track_size
if circular == False: # Determine the proportion of the circumference
self.sweep = 0.9 # around which information will be drawn
else:
self.sweep = 1
def set_track_heights(self):
""" set_track_heights(self)
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Holds total number of 'units' taken up by all tracks
trackunits = {} # Holds start and end units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(1, top_track+1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except:
trackheight = 1 # ...or default to 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder+trackheight)
heightholder += trackheight # move to next height
trackunit_height = 0.5*min(self.pagewidth, self.pageheight)/trackunit_sum
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = trackunit_height*(1-self.track_size)/2. # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1]*trackunit_height-track_crop
btm = trackunits[track][0]*trackunit_height+track_crop
ctr = btm+(top-btm)/2.
self.track_radii[track] = (btm, ctr, top)
def draw(self):
""" draw(self)
Draw a circular diagram of the stored data
"""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [greytrack_bgs, feature_elements,
scale_axes, scale_labels,
feature_labels, greytrack_labels
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines: # Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
""" draw_track(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Return tuple of (list of track elements, list of track labels)
"""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set,
GraphSet: self.draw_graph_set
}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
""" draw_feature_set(self, set) -> ([element, element,...], [element, element,...])
o set FeatureSet object
Returns a tuple (list of elements describing features, list of
labels for elements)
"""
#print 'draw feature set'
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += labels
return feature_elements, label_elements
def draw_feature(self, feature):
""" draw_feature(self, feature, parent_feature=None) -> ([element, element,...], [element, element,...])
o feature Feature containing location info
Returns tuple of (list of elements describing single feature, list
of labels for those elements)
"""
feature_elements = [] # Holds drawable elements for a single feature
label_elements = [] # Holds labels for a single feature
if feature.hide: # Don't show feature: return early
return feature_elements, label_elements
# A single feature may be split into subfeatures, so loop over them
for locstart, locend in feature.locations:
# Get sigil for the feature/ each subfeature
feature_sigil, label = self.get_feature_sigil(feature, locstart, locend)
feature_elements.append(feature_sigil)
if label is not None: # If there's a label
label_elements.append(label)
return feature_elements, label_elements
def get_feature_sigil(self, feature, locstart, locend, **kwargs):
""" get_feature_sigil(self, feature, x0, x1, fragment) -> (element, element)
o feature Feature object
o locstart The start position of the feature
o locend The end position of the feature
Returns a drawable indicator of the feature, and any required label
for it
"""
# Establish the co-ordinates for the sigil
btm, ctr, top = self.track_radii[self.current_track_level]
startangle, startcos, startsin = self.canvas_angle(locstart)
endangle, endcos, endsin = self.canvas_angle(locend)
midangle, midcos, midsin = self.canvas_angle(float(locend+locstart)/2)
# Distribution dictionary for various ways of drawing the feature
# Each method takes the inner and outer radii, the start and end angle
# subtended at the diagram center, and the color as arguments
draw_methods = {'BOX': self._draw_arc,
'ARROW': self._draw_arc_arrow,
}
# Get sigil for the feature, location dependent on the feature strand
method = draw_methods[feature.sigil]
kwargs['head_length_ratio'] = feature.arrowhead_length
kwargs['shaft_height_ratio'] = feature.arrowshaft_height
#Support for clickable links... needs ReportLab 2.4 or later
#which added support for links in SVG output.
if hasattr(feature, "url") :
kwargs["hrefURL"] = feature.url
kwargs["hrefTitle"] = feature.name
if feature.color == colors.white:
border = colors.black
else:
border = feature.color
if feature.strand == 1:
sigil = method(ctr, top, startangle, endangle, feature.color,
border, orientation='right', **kwargs)
elif feature.strand == -1:
sigil = method(btm, ctr, startangle, endangle, feature.color,
border, orientation='left', **kwargs)
else:
sigil = method(btm, top, startangle, endangle, feature.color,
border, **kwargs)
if feature.label: # Feature needs a label
label = String(0, 0, feature.name.strip(),
fontName=feature.label_font,
fontSize=feature.label_size,
fillColor=feature.label_color)
labelgroup = Group(label)
label_angle = startangle + 0.5 * pi # Make text radial
sinval, cosval = startsin, startcos
if feature.strand != -1:
# Feature is on top, or covers both strands
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+top*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+top*cosval)
else:
# Feature on bottom strand
if startangle < pi: # Turn text round and anchor end to inner radius
sinval, cosval = endsin, endcos
label_angle = endangle - 0.5 * pi
else:
labelgroup.contents[0].textAnchor = 'end'
pos = self.xcenter+btm*sinval
coslabel = cos(label_angle)
sinlabel = sin(label_angle)
labelgroup.transform = (coslabel,-sinlabel,sinlabel,coslabel,
pos, self.ycenter+btm*cosval)
else:
labelgroup = None
#if locstart > locend:
# print locstart, locend, feature.strand, sigil, feature.name
#print locstart, locend, feature.name
return sigil, labelgroup
def draw_graph_set(self, set):
""" draw_graph_set(self, set) -> ([element, element,...], [element, element,...])
o set GraphSet object
Returns tuple (list of graph elements, list of graph labels)
"""
#print 'draw graph set'
elements = [] # Holds graph elements
# Distribution dictionary for how to draw the graph
style_methods = {'line': self.draw_line_graph,
'heat': self.draw_heat_graph,
'bar': self.draw_bar_graph
}
for graph in set.get_graphs():
#print graph.name
elements += style_methods[graph.style](graph)
return elements, []
def draw_line_graph(self, graph):
""" draw_line_graph(self, graph, center) -> [element, element,...]
o graph GraphData object
Returns a line graph as a list of drawable elements
"""
#print '\tdraw_line_graph'
line_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
# Start from first data point
pos, val = data[0]
lastangle, lastcos, lastsin = self.canvas_angle(pos)
# We calculate the track height
posheight = trackheight*(val-midval)/resolution + ctr
lastx = self.xcenter+posheight*lastsin # start xy coords
lasty = self.ycenter+posheight*lastcos
for pos, val in data:
posangle, poscos, possin = self.canvas_angle(pos)
posheight = trackheight*(val-midval)/resolution + ctr
x = self.xcenter+posheight*possin # next xy coords
y = self.ycenter+posheight*poscos
line_elements.append(Line(lastx, lasty, x, y,
strokeColor = graph.poscolor,
strokeWidth = graph.linewidth))
lastx, lasty, = x, y
return line_elements
def draw_bar_graph(self, graph):
""" draw_bar_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for a bar graph of the passed
Graph object
"""
#print '\tdraw_bar_graph'
# At each point contained in the graph data, we draw a vertical bar
# from the track center to the height of the datapoint value (positive
# values go up in one color, negative go down in the alternative
# color).
bar_elements = []
# Set the number of pixels per unit for the data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = 0.5*(top-btm)
datarange = maxval - minval
if datarange == 0:
datarange = trackheight
data = graph[self.start:self.end]
# midval is the value at which the x-axis is plotted, and is the
# central ring in the track
if graph.center is None:
midval = (maxval + minval)/2.
else:
midval = graph.center
# Convert data into 'binned' blocks, covering half the distance to the
# next data point on either side, accounting for the ends of fragments
# and tracks
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Whichever is the greatest difference: max-midval or min-midval, is
# taken to specify the number of pixel units resolved along the
# y-axis
resolution = max((midval-minval), (maxval-midval))
if resolution == 0:
resolution = trackheight
# Create elements for the bar graph based on newdata
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
barval = trackheight*(val-midval)/resolution
if barval >=0:
barcolor = graph.poscolor
else:
barcolor = graph.negcolor
# Draw bar
bar_elements.append(self._draw_arc(ctr, ctr+barval, pos0angle,
pos1angle, barcolor))
return bar_elements
def draw_heat_graph(self, graph):
""" draw_heat_graph(self, graph) -> [element, element,...]
o graph Graph object
Returns a list of drawable elements for the heat graph
"""
#print '\tdraw_heat_graph'
# At each point contained in the graph data, we draw a box that is the
# full height of the track, extending from the midpoint between the
# previous and current data points to the midpoint between the current
# and next data points
heat_elements = [] # holds drawable elements
# Get graph data
data_quartiles = graph.quartiles()
minval, maxval = data_quartiles[0],data_quartiles[4]
midval = (maxval + minval)/2. # mid is the value at the X-axis
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-btm)
newdata = intermediate_points(self.start, self.end,
graph[self.start:self.end])
# Create elements on the graph, indicating a large positive value by
# the graph's poscolor, and a large negative value by the graph's
# negcolor attributes
for pos0, pos1, val in newdata:
pos0angle, pos0cos, pos0sin = self.canvas_angle(pos0)
pos1angle, pos1cos, pos1sin = self.canvas_angle(pos1)
# Calculate the heat color, based on the differential between
# the value and the median value
heat = colors.linearlyInterpolatedColor(graph.poscolor,
graph.negcolor,
maxval, minval, val)
# Draw heat box
heat_elements.append(self._draw_arc(btm, top, pos0angle, pos1angle,
heat, border=heat))
return heat_elements
def draw_scale(self, track):
""" draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale)
"""
scale_elements = [] # holds axes and ticks
scale_labels = [] # holds labels
if not track.scale: # no scale required, exit early
return [], []
# Get track locations
btm, ctr, top = self.track_radii[self.current_track_level]
trackheight = (top-ctr)
# X-axis
if self.sweep < 1:
#Draw an arc, leaving out the wedge
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#in degrees.
p.addArc(self.xcenter, self.ycenter, ctr,
startangledegrees=90-360*self.sweep,
endangledegrees=90)
scale_elements.append(p)
del p
else:
#Draw a full circle
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=track.scale_color,
fillColor=None))
if track.scale_ticks: # Ticks are required on the scale
# Draw large ticks
#I want the ticks to be consistently positioned relative to
#the start of the sequence (position 0), not relative to the
#current viewpoint (self.start and self.end)
ticklen = track.scale_largeticks * trackheight
tickiterval = int(track.scale_largetick_interval)
#Note that we could just start the list of ticks using
#range(0,self.end,tickinterval) and the filter out the
#ones before self.start - but this seems wasteful.
#Using tickiterval * (self.start/tickiterval) is a shortcut.
largeticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in largeticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_largetick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Draw small ticks
ticklen = track.scale_smallticks * trackheight
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos \
in range(tickiterval * (self.start//tickiterval),
int(self.end),
tickiterval) \
if pos >= self.start]
for tickpos in smallticks:
tick, label = self.draw_tick(tickpos, ctr, ticklen,
track,
track.scale_smalltick_labels)
scale_elements.append(tick)
if label is not None: # If there's a label, add it
scale_labels.append(label)
# Check to see if the track contains a graph - if it does, get the
# minimum and maximum values, and put them on the scale Y-axis
# at 60 degree intervals, ordering the labels by graph_id
if track.axis_labels:
for set in track.get_sets():
if set.__class__ is GraphSet:
# Y-axis
for n in xrange(7):
angle = n * 1.0471975511965976
ticksin, tickcos = sin(angle), cos(angle)
x0, y0 = self.xcenter+btm*ticksin, self.ycenter+btm*tickcos
x1, y1 = self.xcenter+top*ticksin, self.ycenter+top*tickcos
scale_elements.append(Line(x0, y0, x1, y1,
strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
minval, maxval = quartiles[0], quartiles[4]
if graph.center is None:
midval = (maxval + minval)/2.
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
graph_label_mid.append("%.3f" % midval)
else:
diff = max((graph.center-minval),
(maxval-graph.center))
minval = graph.center-diff
maxval = graph.center+diff
midval = graph.center
graph_label_mid.append("%.3f" % midval)
graph_label_min.append("%.3f" % minval)
graph_label_max.append("%.3f" % maxval)
xmid, ymid = (x0+x1)/2., (y0+y1)/2.
for limit, x, y, in [(graph_label_min, x0, y0),
(graph_label_max, x1, y1),
(graph_label_mid, xmid, ymid)]:
label = String(0, 0, ";".join(limit),
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, -ticksin,
ticksin, tickcos,
x, y)
scale_labels.append(labelgroup)
return scale_elements, scale_labels
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
""" draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale
"""
# Calculate tick co-ordinates
tickangle, tickcos, ticksin = self.canvas_angle(tickpos)
x0, y0 = self.xcenter+ctr*ticksin, self.ycenter+ctr*tickcos
x1, y1 = self.xcenter+(ctr+ticklen)*ticksin, self.ycenter+(ctr+ticklen)*tickcos
# Calculate height of text label so it can be offset on lower half
# of diagram
# LP: not used, as not all fonts have ascent_descent data in reportlab.pdfbase._fontdata
#label_offset = _fontdata.ascent_descent[track.scale_font][0]*\
# track.scale_fontsize/1000.
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label: # Put tick position on as label
if track.scale_format == 'SInt':
if tickpos >= 1000000:
tickstring = str(tickpos//1000000) + " Mbp"
elif tickpos >= 1000:
tickstring = str(tickpos//1000) + " Kbp"
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, # Make label string
fontName=track.scale_font,
fontSize=track.scale_fontsize,
fillColor=track.scale_color)
if tickangle > pi:
label.textAnchor = 'end'
# LP: This label_offset depends on ascent_descent data, which is not available for all
# fonts, so has been deprecated.
#if 0.5*pi < tickangle < 1.5*pi:
# y1 -= label_offset
labelgroup = Group(label)
labelgroup.transform = (1,0,0,1, x1, y1)
else:
labelgroup = None
return tick, labelgroup
def draw_test_tracks(self):
""" draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center.
"""
#print 'drawing test tracks'
# Add lines only for drawn tracks
for track in self.drawn_tracks:
btm, ctr, top = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top,
strokeColor=colors.blue,
fillColor=None)) # top line
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr,
strokeColor=colors.green,
fillColor=None)) # middle line
self.drawing.add(Circle(self.xcenter, self.ycenter, btm,
strokeColor=colors.blue,
fillColor=None)) # bottom line
def draw_greytrack(self, track):
""" draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should
"""
greytrack_bgs = [] # Holds track backgrounds
greytrack_labels = [] # Holds track foreground labels
if not track.greytrack: # No greytrack required, return early
return [], []
# Get track location
btm, ctr, top = self.track_radii[self.current_track_level]
# Make background
if self.sweep < 1:
#Make a partial circle, a large arc box
#This method assumes the correct center for us.
bg = self._draw_arc(btm, top, 0, 2*pi*self.sweep,
colors.Color(0.96, 0.96, 0.96))
else:
#Make a full circle (using a VERY thick linewidth)
bg = Circle(self.xcenter, self.ycenter, ctr,
strokeColor = colors.Color(0.96, 0.96, 0.96),
fillColor=None, strokeWidth=top-btm)
greytrack_bgs.append(bg)
if track.greytrack_labels: # Labels are required for this track
labelstep = self.length//track.greytrack_labels # label interval
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, # Add a new label at
fontName=track.greytrack_font, # each interval
fontSize=track.greytrack_fontsize,
fillColor=track.greytrack_fontcolor)
theta, costheta, sintheta = self.canvas_angle(pos)
x,y = self.xcenter+btm*sintheta, self.ycenter+btm*costheta # start text halfway up marker
labelgroup = Group(label)
labelangle = self.sweep*2*pi*(pos-self.start)/self.length - pi/2
if theta > pi:
label.textAnchor = 'end' # Anchor end of text to inner radius
labelangle += pi # and reorient it
cosA, sinA = cos(labelangle), sin(labelangle)
labelgroup.transform = (cosA, -sinA, sinA,
cosA, x, y)
if not self.length-x <= labelstep: # Don't overrun the circle
greytrack_labels.append(labelgroup)
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
""" canvas_angle(self, base) -> (float, float, float)
"""
angle = self.sweep*2*pi*(base-self.start)/self.length
return (angle, cos(angle), sin(angle))
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle,
color, border=None, colour=None, **kwargs):
""" draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used.
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
if abs(float(endangle - startangle))>.01:
# Wide arc, must use full curves
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
strokewidth=0)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, inner_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius,
90 - (endangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.closePath()
return p
else:
#Cheat and just use a four sided polygon.
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x3,y3 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x4,y4 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
return draw_polygon([(x1,y1),(x2,y2),(x3,y3),(x4,y4)], color, border)
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle,
color, border=None,
shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right',
colour=None, **kwargs):
"""Draw an arrow along an arc."""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if border is None:
border = color
if color is None:
color = colour
if color == colors.white and border is None: # Force black border on
strokecolor = colors.black # white boxes with
elif border is None: # undefined border, else
strokecolor = color # use fill colour
elif border is not None:
strokecolor = border
#if orientation == 'right':
# startangle, endangle = min(startangle, endangle), max(startangle, endangle)
#elif orientation == 'left':
# startangle, endangle = max(startangle, endangle), min(startangle, endangle)
#else:
startangle, endangle = min(startangle, endangle), max(startangle, endangle)
if orientation != "left" and orientation != "right":
raise ValueError("Invalid orientation %s, should be 'left' or 'right'" \
% repr(orientation))
angle = float(endangle - startangle) # angle subtended by arc
middle_radius = 0.5*(inner_radius+outer_radius)
boxheight = outer_radius - inner_radius
shaft_height = boxheight*shaft_height_ratio
shaft_inner_radius = middle_radius - 0.5*shaft_height
shaft_outer_radius = middle_radius + 0.5*shaft_height
headangle_delta = max(0.0,min(abs(boxheight)*head_length_ratio/middle_radius, abs(angle)))
if angle < 0:
headangle_delta *= -1 #reverse it
if orientation=="right":
headangle = endangle-headangle_delta
else:
headangle = startangle+headangle_delta
if startangle <= endangle:
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert startangle <= headangle <= endangle \
or endangle <= headangle <= startangle, \
(startangle, headangle, endangle, angle)
# Calculate trig values for angle and coordinates
startcos, startsin = cos(startangle), sin(startangle)
headcos, headsin = cos(headangle), sin(headangle)
endcos, endsin = cos(endangle), sin(endangle)
x0,y0 = self.xcenter, self.ycenter # origin of the circle
if 0.5 >= abs(angle) and abs(headangle_delta) >= abs(angle):
#If the angle is small, and the arrow is all head,
#cheat and just use a triangle.
if orientation=="right":
x1,y1 = (x0+inner_radius*startsin, y0+inner_radius*startcos)
x2,y2 = (x0+outer_radius*startsin, y0+outer_radius*startcos)
x3,y3 = (x0+middle_radius*endsin, y0+middle_radius*endcos)
else:
x1,y1 = (x0+inner_radius*endsin, y0+inner_radius*endcos)
x2,y2 = (x0+outer_radius*endsin, y0+outer_radius*endcos)
x3,y3 = (x0+middle_radius*startsin, y0+middle_radius*startcos)
#return draw_polygon([(x1,y1),(x2,y2),(x3,y3)], color, border,
# stroke_line_join=1)
return Polygon([x1,y1,x2,y2,x3,y3],
strokeColor=border or color,
fillColor=color,
strokeLineJoin=1, #1=round, not mitre!
strokewidth=0)
elif orientation=="right":
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (headangle * 180 / pi), 90 - (startangle * 180 / pi),
reverse=True)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*endsin, y0+middle_radius*endcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(endangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor,
fillColor=color,
#default is mitre/miter which can stick out too much:
strokeLineJoin=1, #1=round
strokewidth=0,
**kwargs)
#Note reportlab counts angles anti-clockwise from the horizontal
#(as in mathematics, e.g. complex numbers and polar coordinates)
#but we use clockwise from the vertical. Also reportlab uses
#degrees, but we use radians.
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius,
90 - (endangle * 180 / pi), 90 - (headangle * 180 / pi),
reverse=False)
p.lineTo(x0+outer_radius*headsin, y0+outer_radius*headcos)
#TODO - two staight lines is only a good approximation for small
#head angle, in general will need to curved lines here:
if abs(angle) < 0.5:
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
else:
dx = min(0.1, abs(angle)/50.0) #auto-scale number of steps
x = dx
while x < 1:
r = outer_radius - x*(outer_radius-middle_radius)
a = headangle + x*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+middle_radius*startsin, y0+middle_radius*startcos)
x = dx
while x < 1:
r = middle_radius - x*(middle_radius-inner_radius)
a = headangle + (1-x)*(startangle-headangle)
p.lineTo(x0+r*sin(a), y0+r*cos(a))
x += dx
p.lineTo(x0+inner_radius*headsin, y0+inner_radius*headcos)
p.closePath()
return p
|
old_style
|
Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
|
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import collections
import re
import sys
from template import util
from template.constants import *
from template.directive import Directive
from template.grammar import Grammar
from template.util import TemplateException
"""
template.parser - LALR(1) parser for compiling template documents
SYNOPSIS
import template.parser
parser = template.parser.Parser(config)
template = parser.parse(text)
DESCRIPTION
The template.parser module implements a LALR(1) parser and associated
methods for parsing template documents into Python code.
PUBLIC METHODS
__init__(params)
The constructor initializes a new template.parser.Parser object. A
dictionary may be supplied as a parameter to provide configuration
values. These may include:
* START_TAG, END_TAG
The START_TAG and END_TAG options are used to specify character
sequences or regular expressions that mark the start and end of a
template directive. The default values for START_TAG and END_TAG are
'[%' and '%]' respectively, giving us the familiar directive style:
[% example %]
Any Python regex characters can be used and therefore should be
escaped (or use the re.escape function) if they are intended to
represent literal characters.
parser = template.parser.Parser({
'START_TAG': re.escape('<+'),
'END_TAG': re.escape('+>'),
})
example:
<+ INCLUDE foobar +>
The TAGS directive can also be used to set the START_TAG and END_TAG values
on a per-template file basis.
[% TAGS <+ +> %]
* TAG_STYLE
The TAG_STYLE option can be used to set both START_TAG and END_TAG
according to pre-defined tag styles.
parser = template.parser.Parser({
'TAG_STYLE': 'star',
})
Available styles are:
template [% ... %] (default)
template1 [% ... %] or %% ... %% (TT version 1)
metatext %% ... %% (Text::MetaText)
star [* ... *] (TT alternate)
php <? ... ?> (PHP)
asp <% ... %> (ASP)
mason <% ... > (HTML::Mason)
html <!-- ... --> (HTML comments)
Any values specified for START_TAG and/or END_TAG will over-ride those
defined by a TAG_STYLE.
The TAGS directive may also be used to set a TAG_STYLE
[% TAGS html %]
<!-- INCLUDE header -->
* PRE_CHOMP, POST_CHOMP
Anything outside a directive tag is considered plain text and is
generally passed through unaltered (but see the INTERPOLATE option).
This includes all whitespace and newlines characters surrounding
directive tags. Directives that don't generate any output will leave
gaps in the output document.
Example:
Foo
[% a = 10 %]
Bar
Output:
Foo
Bar
The PRE_CHOMP and POST_CHOMP options can help to clean up some of this
extraneous whitespace. Both are disabled by default.
parser = template.parser.Parser({
'PRE_CHOMP': 1,
'POST_CHOMP': 1,
})
With PRE_CHOMP set to 1, the newline and whitespace preceding a
directive at the start of a line will be deleted. This has the effect
of concatenating a line that starts with a directive onto the end of
the previous line.
Foo E<lt>----------.
|
,---(PRE_CHOMP)----'
|
`-- [% a = 10 %] --.
|
,---(POST_CHOMP)---'
|
`-E<gt> Bar
With POST_CHOMP set to 1, any whitespace after a directive up to and
including the newline will be deleted. This has the effect of joining
a line that ends with a directive onto the start of the next line.
If PRE_CHOMP or POST_CHOMP is set to 2, all whitespace including any
number of newline will be removed and replaced with a single space.
This is useful for HTML, where (usually) a contiguous block of
whitespace is rendered the same as a single space.
With PRE_CHOMP or POST_CHOMP set to 3, all adjacent whitespace
(including newlines) will be removed entirely.
These values are defined as CHOMP_NONE, CHOMP_ONE, CHOMP_COLLAPSE and
CHOMP_GREEDY constants in the template.constants module. CHOMP_ALL
is also defined as an alias for CHOMP_ONE to provide backwards
compatability with earlier version of the Template Toolkit.
Additionally the chomp tag modifiers listed below may also be used for
the PRE_CHOMP and POST_CHOMP configuration.
tt = template.Template({
'PRE_CHOMP': '~',
'POST_CHOMP': '-',
})
PRE_CHOMP and POST_CHOMP can be activated for individual directives by
placing a '-' immediately at the start and/or end of the directive.
[% FOREACH user IN userlist %]
[%- user -%]
[% END %]
This has the same effect as CHOMP_ONE in removing all whitespace
before or after the directive up to and including the newline. The
template will be processed as if written:
[% FOREACH user IN userlist %][% user %][% END %]
To remove all whitespace including any number of newlines, use the '~'
character instead.
[% FOREACH user IN userlist %]
[%~ user ~%]
[% END %]
To collapse all whitespace to a single space, use the '=' character.
[% FOREACH user IN userlist %]
[%= user =%]
[% END %]
Here the template is processed as if written:
[% FOREACH user IN userlist %] [% user %] [% END %]
If you have PRE_CHOMP or POST_CHOMP set as configuration options then
you can use '+' to disable any chomping options (i.e. leave the
whitespace intact) on a per-directive basis.
[% FOREACH user = userlist %]
User: [% user +%]
[% END %]
With POST_CHOMP set to CHOMP_ONE, the above example would be parsed as
if written:
[% FOREACH user = userlist %]User: [% user %]
[% END %]
For reference, the PRE_CHOMP and POST_CHOMP configuration options may be set to any of the following:
Constant Value Tag Modifier
----------------------------------
CHOMP_NONE 0 +
CHOMP_ONE 1 -
CHOMP_COLLAPSE 2 =
CHOMP_GREEDY 3 ~
* INTERPOLATE
The INTERPOLATE flag, when set to any true value will cause variable
references in plain text (i.e. not surrounded by START_TAG and
END_TAG) to be recognised and interpolated accordingly.
parser = template.parser.Parser({
'INTERPOLATE': 1,
})
Variables should be prefixed by a '$' to identify them. Curly braces
can be used in the familiar Perl/shell style to explicitly scope the
variable name where required.
# INTERPOLATE => 0
<a href="http://[% server %]/[% help %]">
<img src="[% images %]/help.gif"></a>
[% myorg.name %]
# INTERPOLATE => 1
<a href="http://$server/$help">
<img src="$images/help.gif"></a>
$myorg.name
# explicit scoping with { }
<img src="$images/${icon.next}.gif">
Note that a limitation in Perl's regex engine restricts the maximum
length of an interpolated template to around 32 kilobytes or possibly
less. Files that exceed this limit in size will typically cause Perl
to dump core with a segmentation fault. If you routinely process
templates of this size then you should disable INTERPOLATE or split
the templates in several smaller files or blocks which can then be
joined backed together via PROCESS or INCLUDE.
It is unknown whether this limitation is shared by the Python regex
engine.
* ANYCASE
By default, directive keywords should be expressed in UPPER CASE. The
ANYCASE option can be set to allow directive keywords to be specified
in any case.
# ANYCASE => 0 (default)
[% INCLUDE foobar %] # OK
[% include foobar %] # ERROR
[% include = 10 %] # OK, 'include' is a variable
# ANYCASE => 1
[% INCLUDE foobar %] # OK
[% include foobar %] # OK
[% include = 10 %] # ERROR, 'include' is reserved word
One side-effect of enabling ANYCASE is that you cannot use a variable
of the same name as a reserved word, regardless of case. The reserved
words are currently:
GET CALL SET DEFAULT INSERT INCLUDE PROCESS WRAPPER
IF UNLESS ELSE ELSIF FOR FOREACH WHILE SWITCH CASE
USE PLUGIN FILTER MACRO PYTHON RAWPYTHON BLOCK META
TRY THROW CATCH FINAL NEXT LAST BREAK RETURN STOP
CLEAR TO STEP AND OR NOT MOD DIV END
The only lower case reserved words that cannot be used for variables,
regardless of the ANYCASE option, are the operators:
and or not mod div
* V1DOLLAR
In version 1 of the Template Toolkit, an optional leading '$' could be placed
on any template variable and would be silently ignored.
# VERSION 1
[% $foo %] === [% foo %]
[% $hash.$key %] === [% hash.key %]
To interpolate a variable value the '${' ... '}' construct was used.
Typically, one would do this to index into a hash array when the key
value was stored in a variable.
example:
vars = {
users => {
'aba': { 'name': 'Alan Aardvark', ... },
'abw': { 'name': 'Andy Wardley', ... },
...
},
'uid': 'aba',
...
}
template.process('user/home.html', vars)
'user/home.html':
[% user = users.${uid} %] # users.aba
Name: [% user.name %] # Alan Aardvark
This was inconsistent with double quoted strings and also the
INTERPOLATE mode, where a leading '$' in text was enough to indicate a
variable for interpolation, and the additional curly braces were used
to delimit variable names where necessary. Note that this use is
consistent with UNIX and Perl conventions, among others.
# double quoted string interpolation
[% name = "$title ${user.name}" %]
# INTERPOLATE = 1
<img src="$images/help.gif"></a>
<img src="$images/${icon.next}.gif">
For version 2, these inconsistencies have been removed and the syntax
clarified. A leading '$' on a variable is now used exclusively to
indicate that the variable name should be interpolated
(e.g. subsituted for its value) before being used. The earlier example
from version 1:
# VERSION 1
[% user = users.${uid} %]
Name: [% user.name %]
can now be simplified in version 2 as:
# VERSION 2
[% user = users.$uid %]
Name: [% user.name %]
The leading dollar is no longer ignored and has the same effect of
interpolation as '${' ... '}' in version 1. The curly braces may
still be used to explicitly scope the interpolated variable name
where necessary.
e.g.
[% user = users.${me.id} %]
Name: [% user.name %]
The rule applies for all variables, both within directives and in
plain text if processed with the INTERPOLATE option. This means that
you should no longer (if you ever did) add a leading '$' to a variable
inside a directive, unless you explicitly want it to be interpolated.
One obvious side-effect is that any version 1 templates with variables
using a leading '$' will no longer be processed as expected. Given
the following variable definitions,
[% foo = 'bar'
bar = 'baz'
%]
version 1 would interpret the following as:
# VERSION 1
[% $foo %] => [% GET foo %] => bar
whereas version 2 interprets it as:
# VERSION 2
[% $foo %] => [% GET $foo %] => [% GET bar %] => baz
In version 1, the '$' is ignored and the value for the variable 'foo'
is retrieved and printed. In version 2, the variable '$foo' is first
interpolated to give the variable name 'bar' whose value is then
retrieved and printed.
The use of the optional '$' has never been strongly recommended, but
to assist in backwards compatibility with any version 1 templates that
may rely on this "feature", the V1DOLLAR option can be set to 1
(default: 0) to revert the behaviour and have leading '$' characters
ignored.
parser = template.parser.Parser->new({
'V1DOLLAR': 1,
});
* GRAMMAR
The GRAMMAR configuration item can be used to specify an alternate
grammar for the parser. This allows a modified or entirely new
template language to be constructed and used by the Template Toolkit.
Source templates are compiled to Python code by the template.parser
module using the template.grammar module (by default) to define the
language structure and semantics. Compiled templates are thus
inherently "compatible" with each other and there is nothing to prevent
any number of different template languages being compiled and used within
the same Template Toolkit processing environment (other than the usual
time and memory constraints).
The template.grammar file is constructed from a YACC like grammar
(using Parse::YAPP) and a skeleton module template. These files are
provided, along with a small script to rebuild the grammar, in the
'parser' sub-directory of the distribution. You don't have to know or
worry about these unless you want to hack on the template language or
define your own variant. There is a README file in the same directory
which provides some small guidance but it is assumed that you know
what you're doing if you venture herein. If you grok LALR parsers,
then you should find it comfortably familiar.
By default, an instance of the default template.grammar.Grammar will
be created and used automatically if a GRAMMAR item isn't specified.
import myorg.template.grammar
parser = template.parser.Parser({
'GRAMMAR': myorg.template.grammar.Grammar(),
})
* DEBUG
The DEBUG option can be used to enable various debugging features of
the Template::Parser module.
from template.constants import *
tt = template.Template({
'DEBUG': DEBUG_PARSER | DEBUG_DIRS,
})
The DEBUG value can include any of the following. Multiple values
should be combined using the logical OR operator, '|'.
** DEBUG_PARSER
This flag causes the Parser to generate debugging messages that show
the Python code generated by parsing and compiling each template.
** DEBUG_DIRS
This option causes the Template Toolkit to generate comments
indicating the source file, line and original text of each directive
in the template. These comments are embedded in the template output
using the format defined in the DEBUG_FORMAT configuration item, or a
simple default format if unspecified.
For example, the following template fragment:
Hello World
would generate this output:
## input text line 1 : ##
Hello
## input text line 2 : World ##
World
parse(text)
The parse() method parses the text passed in the first parameter and
returns a dictionary of data defining the compiled representation of
the template text, suitable for passing to the
template.document.Document constructor.
Example:
data = parser.parse(text)
The data dictionary returned contains a BLOCK item containing the
compiled Python code for the template, a DEFBLOCKS item containing a
dictionary of sub-template BLOCKs defined within in the template, and
a METADATA item containing a dictionary of metadata values defined in
META tags.
"""
CONTINUE = 0
ACCEPT = 1
ERROR = 2
ABORT = 3
TAG_STYLE = {
"default": (r"\[%", r"%\]"),
"template1": (r"[[%]%", r"%[]%]"),
"metatext": (r"%%", r"%%"),
"html": (r"<!--", r"-->"),
"mason": (r"<%", r">"),
"asp": (r"<%", r"%>"),
"php": (r"<\?", r"\?>"),
"star": (r"\[\*", r"\*\]"),
}
TAG_STYLE["template"] = TAG_STYLE["tt2"] = TAG_STYLE["default"]
DEFAULT_STYLE = {
"START_TAG": TAG_STYLE["default"][0],
"END_TAG": TAG_STYLE["default"][1],
"ANYCASE": 0,
"INTERPOLATE": 0,
"PRE_CHOMP": 0,
"POST_CHOMP": 0,
"V1DOLLAR": 0,
"EVAL_PYTHON": 0,
}
ESCAPE = {"n": "\n", "r": "\r", "t": "\t"}
CHOMP_FLAGS = r"[-=~+]"
CHOMP_ALL = str(CHOMP_ALL)
CHOMP_COLLAPSE = str(CHOMP_COLLAPSE)
CHOMP_GREEDY = str(CHOMP_GREEDY)
CHOMP_NONE = str(CHOMP_NONE)
CHOMP_CONST = {
"-": CHOMP_ALL,
"=": CHOMP_COLLAPSE,
"~": CHOMP_GREEDY,
"+": CHOMP_NONE
}
PRE_CHOMP = {
CHOMP_ALL: lambda x: re.sub(r"(\n|^)[^\S\n]*\Z", "", x),
CHOMP_COLLAPSE: lambda x: re.sub(r"\s+\Z", " ", x),
CHOMP_GREEDY: lambda x: re.sub(r"\s+\Z", "", x),
CHOMP_NONE: lambda x: x,
}
def postchomp(regex, prefix):
regex = re.compile(regex)
def strip(text, postlines):
match = regex.match(text)
if match:
text = prefix + text[match.end():]
postlines += match.group().count("\n")
return text, postlines
return strip
POST_CHOMP = {
CHOMP_ALL: postchomp(r"[^\S\n]*\n", ""),
CHOMP_COLLAPSE: postchomp(r"\s+", " "),
CHOMP_GREEDY: postchomp(r"\s+", ""),
CHOMP_NONE: lambda x, y: (x, y),
}
def Chomp(x):
return re.sub(r"[-=~+]", lambda m: CHOMP_CONST[m.group()], str(x))
GRAMMAR = re.compile(r"""
# strip out any comments
(\#[^\n]*)
|
# a quoted string matches in $3
(["']) # $2 - opening quote, ' or "
( # $3 - quoted text buffer
(?: # repeat group (no backreference)
\\\\ # an escaped backslash
| # ...or...
\\\2 # an escaped quote \" or \' (match $1)
| # ...or...
. # any other character
| \n
)*? # non-greedy repeat
) # end of $3
\2 # match opening quote
|
# an unquoted number matches in $4
(-? \d+ (?: \. \d+ )?) # numbers
|
# filename matches in $5
( /? \w+ (?: (?: /|::? ) \w* )+ | /\w+ )
|
# an identifier matches in $6
(\w+)
|
# an unquoted word or symbol matches in $7
( [(){}\[\]:;,/\\] # misc parentheses and symbols
| -> # arrow operator (for future?)
| [-+*] # math operations
| \${? # dollar with optional left brace
| => # like "="
| [=!<>]?= | [!<>] # equality tests
| &&? | \|\|? # boolean ops
| \.\.? # n..n sequence
| \S+ # something unquoted
) # end of $7
""", re.VERBOSE)
QUOTED_STRING = re.compile(r"""
( (?: \\. | [^\$] ){1,3000} ) # escaped or non-'$' character [$1]
|
( \$ (?: # embedded variable [$2]
(?: \{ ([^\}]*) \} ) # ${ ... } [$3]
|
([\w\.]+) # $word [$4]
)
)
""", re.VERBOSE)
class Error(Exception):
"""A trivial local exception class."""
pass
class Parser:
"""This module implements a LALR(1) parser and assocated support
methods to parse template documents into the appropriate "compiled"
format.
"""
def __init__(self, param):
self.start_tag = param.get("START_TAG") or DEFAULT_STYLE["START_TAG"]
self.end_tag = param.get("END_TAG") or DEFAULT_STYLE["END_TAG"]
self.tag_style = param.get("TAG_STYLE", "default")
self.anycase = param.get("ANYCASE", False)
self.interpolate = param.get("INTERPOLATE", False)
self.pre_chomp = param.get("PRE_CHOMP", CHOMP_NONE)
self.post_chomp = param.get("POST_CHOMP", CHOMP_NONE)
self.v1dollar = param.get("V1DOLLAR", False)
self.eval_python = param.get("EVAL_PYTHON", False)
self.file_info = param.get("FILE_INFO", 1)
self.grammar = param.get("GRAMMAR", Grammar())
self.factory = param.get("FACTORY", Directive)
self.fileinfo = []
self.defblocks = []
self.defblock_stack = []
self.infor = 0
self.inwhile = 0
self.style = []
# Build a FACTORY object to include any NAMESPACE definitions,
# but only if FACTORY isn't already a (non-callable) object.
if isinstance(self.factory, collections.Callable):
self.factory = self.factory(param)
self.lextable = self.grammar.lextable
self.states = self.grammar.states
self.rules = self.grammar.rules
self.new_style(param)
self.tokenize = (
((1,), self._comment),
((2, 3), self._string),
((4,), self._number),
((5,), self._filename),
((6,), self._identifier),
((7,), self._word),
)
def new_style(self, config):
"""Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc.
"""
if self.style:
style = self.style[-1]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get("TAG_STYLE")
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if tags is None:
raise Error("Invalid tag style: %s" % tagstyle)
start, end = tags
config["START_TAG"] = config.get("START_TAG", start)
config["END_TAG"] = config.get("END_TAG", end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if value is not None:
style[key] = value
self.style.append(style)
return style
# MASKED: old_style function (lines 703-711)
def location(self):
"""Return Python comment indicating current parser file and line."""
if not self.file_info:
return "\n"
line = self.line
info = self.fileinfo[-1]
file = info and (info.path or info.name) or "(unknown template)"
line = re.sub(r"-.*", "", str(line)) # might be 'n-n'
return '#line %s "%s"\n' % (line, file)
def parse(self, text, info=None):
"""Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document.
"""
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if tokens is None:
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {"BLOCK": block,
"DEFBLOCKS": self.defblock,
"METADATA": self.metadata}
else:
return None
def split_text(self, text):
"""Split input template text into directives and raw text chunks."""
tokens = []
line = 1
style = self.style[-1]
def make_splitter(delims):
return re.compile(r"(?s)(.*?)%s(.*?)%s" % delims)
splitter = make_splitter((style["START_TAG"], style["END_TAG"]))
while True:
match = splitter.match(text)
if not match:
break
text = text[match.end():]
pre, dir = match.group(1), match.group(2)
prelines = pre.count("\n")
dirlines = dir.count("\n")
postlines = 0
if dir.startswith("#"):
# commment out entire directive except for any end chomp flag
match = re.search(CHOMP_FLAGS + "$", dir)
if match:
dir = match.group()
else:
dir = ""
else:
# PRE_CHOMP: process whitespace before tag
match = re.match(r"(%s)?\s*" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["PRE_CHOMP"])
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
# POST_CHOMP: process whitespace after tag
match = re.search(r"\s*(%s)?\s*$" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["POST_CHOMP"])
if match:
dir = dir[:match.start()]
text, postlines = POST_CHOMP[chomp](text, postlines)
if pre:
if style["INTERPOLATE"]:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(["TEXT", pre])
line += prelines
if dir:
# The TAGS directive is a compile-time switch.
match = re.match(r"(?i)TAGS\s+(.*)", dir)
if match:
tags = re.split(r"\s+", match.group(1))
if len(tags) > 1:
splitter = make_splitter(tuple(re.escape(x) for x in tags[:2]))
elif tags[0] in TAG_STYLE:
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write("Invalid TAGS style: %s" % tags[0])
else:
if dirlines > 0:
line_range = "%d-%d" % (line, line + dirlines)
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += dirlines + postlines
if text:
if style["INTERPOLATE"]:
tokens.append([text, line, "ITEXT"])
else:
tokens.extend(["TEXT", text])
return tokens
def _comment(self, token):
"""Tokenizes a comment."""
return ()
def _string(self, quote, token):
"""Tokenizes a string."""
if quote == '"':
if re.search(r"[$\\]", token):
# unescape " and \ but leave \$ escaped so that
# interpolate_text() doesn't incorrectly treat it
# as a variable reference
token = re.sub(r'\\([\\"])', r'\1', token)
token = re.sub(r'\\([^$nrt])', r'\1', token)
token = re.sub(r'\\([nrt])', lambda m: ESCAPE[m.group(1)], token)
return ['"', '"'] + self.interpolate_text(token) + ['"', '"']
else:
return "LITERAL", "scalar(%r)" % token
else:
# Remove escaped single quotes and backslashes:
token = re.sub(r"\\(.)", lambda m: m.group(m.group(1) in "'\\"), token)
return "LITERAL", "scalar(%r)" % token
def _number(self, token):
"""Tokenizes a number."""
return "NUMBER", "scalar(%s)" % token
def _filename(self, token):
"""Tokenizes a filename."""
return "FILENAME", token
def _identifier(self, token):
"""Tokenizes an identifier."""
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if toktype is not None:
return toktype, uctoken
else:
return "IDENT", token
def _word(self, token):
"""Tokenizes an unquoted word or symbol ."""
return self.lextable.get(token, "UNQUOTED"), token
def tokenise_directive(self, dirtext):
"""Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text.
"""
tokens = []
for match in GRAMMAR.finditer(dirtext):
for indices, method in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens
def _parse(self, tokens, info):
"""Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details.
"""
self.grammar.install_factory(self.factory)
stack = [[0, None]] # DFA stack
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = info and info.name
self.inpython = 0
value = None
while True:
stateno = stack[-1][0]
state = self.states[stateno]
# see if any lookaheads exist for the current state
if "ACTIONS" in state:
# get next token and expand any directives (ie. token is a
# list) onto the front of the token list
while token is None and tokens:
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
text, self.line, token = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = token + [";", ";"]
token = None # force redo
elif token == "ITEXT":
if in_python:
# don't perform interpolation in PYTHON blocks
token = "TEXT"
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None # force redo
else:
# toggle string flag to indicate if we're crossing
# a string boundary
if token == '"':
in_string = not in_string
value = tokens and tokens.pop(0) or None
if token is None:
token = ""
# get the next state for the current lookahead token
lookup = state["ACTIONS"].get(token)
if lookup:
action = lookup
else:
action = state.get("DEFAULT")
else:
# no lookahead assertions
action = state.get("DEFAULT")
# ERROR: no ACTION
if action is None:
break
# shift (positive ACTION)
if action > 0:
stack.append([action, value])
token = value = None
else:
# reduce (negative ACTION)
lhs, len_, code = self.rules[-action]
# no action implies ACCEPTance
if not action:
status = ACCEPT
# use dummy sub if code ref doesn't exist
if not code:
code = lambda *arg: len(arg) >= 2 and arg[1] or None
if len_ > 0:
codevars = [x[1] for x in stack[-len_:]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
# reduce stack by len_
if len_ > 0:
stack[-len_:] = []
# ACCEPT
if status == ACCEPT:
return coderet
elif status == ABORT:
return None
elif status == ERROR:
break
stack.append([self.states[stack[-1][0]].get("GOTOS", {}).get(lhs),
coderet])
# ERROR
if value is None:
self._parse_error("unexpected end of input", info.name)
elif value == ";":
self._parse_error("unexpected end of directive", info.name, text)
else:
self._parse_error("unexpected token (%s)" %
util.unscalar_lex(value), info.name, text)
def _parse_error(self, msg, name, text=None):
"""Method used to handle errors encountered during the parse process
in the _parse() method.
"""
line = self.line or "unknown"
if text is not None:
msg += "\n [%% %s %%]" % text
raise TemplateException("parse", "%s line %s: %s" % (name, line, msg))
def define_block(self, name, block):
"""Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
"""
if self.defblock is None:
return None
self.defblock[name] = block
return None
def push_defblock(self):
self.defblock_stack.append(self.defblock)
self.defblock = {}
def pop_defblock(self):
if not self.defblock_stack:
return self.defblock
block = self.defblock
self.defblock = self.defblock_stack.pop(0)
return block
def add_metadata(self, setlist):
setlist = [util.unscalar_lex(x) for x in setlist]
if self.metadata is not None:
for key, value in util.chop(setlist, 2):
self.metadata[key] = value
return None
def interpolate_text(self, text, line=0):
"""Examines text looking for any variable references embedded
like $this or like ${ this }.
"""
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = match.group(3) or match.group(4)
dir = match.group(2)
# preceding text
if pre:
line += pre.count("\n")
tokens.extend(("TEXT", pre.replace("\\$", "$")))
# variable reference
if var:
line += dir.count("\n")
tokens.append([dir, line, self.tokenise_directive(var)])
# other '$' reference - treated as text
elif dir:
line += dir.count("\n")
tokens.extend(("TEXT", dir))
return tokens
|
def old_style(self):
"""Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
"""
if len(self.style) <= 1:
raise Error("only 1 parser style remaining")
self.style.pop()
return self.style[-1]
| 703 | 711 |
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import collections
import re
import sys
from template import util
from template.constants import *
from template.directive import Directive
from template.grammar import Grammar
from template.util import TemplateException
"""
template.parser - LALR(1) parser for compiling template documents
SYNOPSIS
import template.parser
parser = template.parser.Parser(config)
template = parser.parse(text)
DESCRIPTION
The template.parser module implements a LALR(1) parser and associated
methods for parsing template documents into Python code.
PUBLIC METHODS
__init__(params)
The constructor initializes a new template.parser.Parser object. A
dictionary may be supplied as a parameter to provide configuration
values. These may include:
* START_TAG, END_TAG
The START_TAG and END_TAG options are used to specify character
sequences or regular expressions that mark the start and end of a
template directive. The default values for START_TAG and END_TAG are
'[%' and '%]' respectively, giving us the familiar directive style:
[% example %]
Any Python regex characters can be used and therefore should be
escaped (or use the re.escape function) if they are intended to
represent literal characters.
parser = template.parser.Parser({
'START_TAG': re.escape('<+'),
'END_TAG': re.escape('+>'),
})
example:
<+ INCLUDE foobar +>
The TAGS directive can also be used to set the START_TAG and END_TAG values
on a per-template file basis.
[% TAGS <+ +> %]
* TAG_STYLE
The TAG_STYLE option can be used to set both START_TAG and END_TAG
according to pre-defined tag styles.
parser = template.parser.Parser({
'TAG_STYLE': 'star',
})
Available styles are:
template [% ... %] (default)
template1 [% ... %] or %% ... %% (TT version 1)
metatext %% ... %% (Text::MetaText)
star [* ... *] (TT alternate)
php <? ... ?> (PHP)
asp <% ... %> (ASP)
mason <% ... > (HTML::Mason)
html <!-- ... --> (HTML comments)
Any values specified for START_TAG and/or END_TAG will over-ride those
defined by a TAG_STYLE.
The TAGS directive may also be used to set a TAG_STYLE
[% TAGS html %]
<!-- INCLUDE header -->
* PRE_CHOMP, POST_CHOMP
Anything outside a directive tag is considered plain text and is
generally passed through unaltered (but see the INTERPOLATE option).
This includes all whitespace and newlines characters surrounding
directive tags. Directives that don't generate any output will leave
gaps in the output document.
Example:
Foo
[% a = 10 %]
Bar
Output:
Foo
Bar
The PRE_CHOMP and POST_CHOMP options can help to clean up some of this
extraneous whitespace. Both are disabled by default.
parser = template.parser.Parser({
'PRE_CHOMP': 1,
'POST_CHOMP': 1,
})
With PRE_CHOMP set to 1, the newline and whitespace preceding a
directive at the start of a line will be deleted. This has the effect
of concatenating a line that starts with a directive onto the end of
the previous line.
Foo E<lt>----------.
|
,---(PRE_CHOMP)----'
|
`-- [% a = 10 %] --.
|
,---(POST_CHOMP)---'
|
`-E<gt> Bar
With POST_CHOMP set to 1, any whitespace after a directive up to and
including the newline will be deleted. This has the effect of joining
a line that ends with a directive onto the start of the next line.
If PRE_CHOMP or POST_CHOMP is set to 2, all whitespace including any
number of newline will be removed and replaced with a single space.
This is useful for HTML, where (usually) a contiguous block of
whitespace is rendered the same as a single space.
With PRE_CHOMP or POST_CHOMP set to 3, all adjacent whitespace
(including newlines) will be removed entirely.
These values are defined as CHOMP_NONE, CHOMP_ONE, CHOMP_COLLAPSE and
CHOMP_GREEDY constants in the template.constants module. CHOMP_ALL
is also defined as an alias for CHOMP_ONE to provide backwards
compatability with earlier version of the Template Toolkit.
Additionally the chomp tag modifiers listed below may also be used for
the PRE_CHOMP and POST_CHOMP configuration.
tt = template.Template({
'PRE_CHOMP': '~',
'POST_CHOMP': '-',
})
PRE_CHOMP and POST_CHOMP can be activated for individual directives by
placing a '-' immediately at the start and/or end of the directive.
[% FOREACH user IN userlist %]
[%- user -%]
[% END %]
This has the same effect as CHOMP_ONE in removing all whitespace
before or after the directive up to and including the newline. The
template will be processed as if written:
[% FOREACH user IN userlist %][% user %][% END %]
To remove all whitespace including any number of newlines, use the '~'
character instead.
[% FOREACH user IN userlist %]
[%~ user ~%]
[% END %]
To collapse all whitespace to a single space, use the '=' character.
[% FOREACH user IN userlist %]
[%= user =%]
[% END %]
Here the template is processed as if written:
[% FOREACH user IN userlist %] [% user %] [% END %]
If you have PRE_CHOMP or POST_CHOMP set as configuration options then
you can use '+' to disable any chomping options (i.e. leave the
whitespace intact) on a per-directive basis.
[% FOREACH user = userlist %]
User: [% user +%]
[% END %]
With POST_CHOMP set to CHOMP_ONE, the above example would be parsed as
if written:
[% FOREACH user = userlist %]User: [% user %]
[% END %]
For reference, the PRE_CHOMP and POST_CHOMP configuration options may be set to any of the following:
Constant Value Tag Modifier
----------------------------------
CHOMP_NONE 0 +
CHOMP_ONE 1 -
CHOMP_COLLAPSE 2 =
CHOMP_GREEDY 3 ~
* INTERPOLATE
The INTERPOLATE flag, when set to any true value will cause variable
references in plain text (i.e. not surrounded by START_TAG and
END_TAG) to be recognised and interpolated accordingly.
parser = template.parser.Parser({
'INTERPOLATE': 1,
})
Variables should be prefixed by a '$' to identify them. Curly braces
can be used in the familiar Perl/shell style to explicitly scope the
variable name where required.
# INTERPOLATE => 0
<a href="http://[% server %]/[% help %]">
<img src="[% images %]/help.gif"></a>
[% myorg.name %]
# INTERPOLATE => 1
<a href="http://$server/$help">
<img src="$images/help.gif"></a>
$myorg.name
# explicit scoping with { }
<img src="$images/${icon.next}.gif">
Note that a limitation in Perl's regex engine restricts the maximum
length of an interpolated template to around 32 kilobytes or possibly
less. Files that exceed this limit in size will typically cause Perl
to dump core with a segmentation fault. If you routinely process
templates of this size then you should disable INTERPOLATE or split
the templates in several smaller files or blocks which can then be
joined backed together via PROCESS or INCLUDE.
It is unknown whether this limitation is shared by the Python regex
engine.
* ANYCASE
By default, directive keywords should be expressed in UPPER CASE. The
ANYCASE option can be set to allow directive keywords to be specified
in any case.
# ANYCASE => 0 (default)
[% INCLUDE foobar %] # OK
[% include foobar %] # ERROR
[% include = 10 %] # OK, 'include' is a variable
# ANYCASE => 1
[% INCLUDE foobar %] # OK
[% include foobar %] # OK
[% include = 10 %] # ERROR, 'include' is reserved word
One side-effect of enabling ANYCASE is that you cannot use a variable
of the same name as a reserved word, regardless of case. The reserved
words are currently:
GET CALL SET DEFAULT INSERT INCLUDE PROCESS WRAPPER
IF UNLESS ELSE ELSIF FOR FOREACH WHILE SWITCH CASE
USE PLUGIN FILTER MACRO PYTHON RAWPYTHON BLOCK META
TRY THROW CATCH FINAL NEXT LAST BREAK RETURN STOP
CLEAR TO STEP AND OR NOT MOD DIV END
The only lower case reserved words that cannot be used for variables,
regardless of the ANYCASE option, are the operators:
and or not mod div
* V1DOLLAR
In version 1 of the Template Toolkit, an optional leading '$' could be placed
on any template variable and would be silently ignored.
# VERSION 1
[% $foo %] === [% foo %]
[% $hash.$key %] === [% hash.key %]
To interpolate a variable value the '${' ... '}' construct was used.
Typically, one would do this to index into a hash array when the key
value was stored in a variable.
example:
vars = {
users => {
'aba': { 'name': 'Alan Aardvark', ... },
'abw': { 'name': 'Andy Wardley', ... },
...
},
'uid': 'aba',
...
}
template.process('user/home.html', vars)
'user/home.html':
[% user = users.${uid} %] # users.aba
Name: [% user.name %] # Alan Aardvark
This was inconsistent with double quoted strings and also the
INTERPOLATE mode, where a leading '$' in text was enough to indicate a
variable for interpolation, and the additional curly braces were used
to delimit variable names where necessary. Note that this use is
consistent with UNIX and Perl conventions, among others.
# double quoted string interpolation
[% name = "$title ${user.name}" %]
# INTERPOLATE = 1
<img src="$images/help.gif"></a>
<img src="$images/${icon.next}.gif">
For version 2, these inconsistencies have been removed and the syntax
clarified. A leading '$' on a variable is now used exclusively to
indicate that the variable name should be interpolated
(e.g. subsituted for its value) before being used. The earlier example
from version 1:
# VERSION 1
[% user = users.${uid} %]
Name: [% user.name %]
can now be simplified in version 2 as:
# VERSION 2
[% user = users.$uid %]
Name: [% user.name %]
The leading dollar is no longer ignored and has the same effect of
interpolation as '${' ... '}' in version 1. The curly braces may
still be used to explicitly scope the interpolated variable name
where necessary.
e.g.
[% user = users.${me.id} %]
Name: [% user.name %]
The rule applies for all variables, both within directives and in
plain text if processed with the INTERPOLATE option. This means that
you should no longer (if you ever did) add a leading '$' to a variable
inside a directive, unless you explicitly want it to be interpolated.
One obvious side-effect is that any version 1 templates with variables
using a leading '$' will no longer be processed as expected. Given
the following variable definitions,
[% foo = 'bar'
bar = 'baz'
%]
version 1 would interpret the following as:
# VERSION 1
[% $foo %] => [% GET foo %] => bar
whereas version 2 interprets it as:
# VERSION 2
[% $foo %] => [% GET $foo %] => [% GET bar %] => baz
In version 1, the '$' is ignored and the value for the variable 'foo'
is retrieved and printed. In version 2, the variable '$foo' is first
interpolated to give the variable name 'bar' whose value is then
retrieved and printed.
The use of the optional '$' has never been strongly recommended, but
to assist in backwards compatibility with any version 1 templates that
may rely on this "feature", the V1DOLLAR option can be set to 1
(default: 0) to revert the behaviour and have leading '$' characters
ignored.
parser = template.parser.Parser->new({
'V1DOLLAR': 1,
});
* GRAMMAR
The GRAMMAR configuration item can be used to specify an alternate
grammar for the parser. This allows a modified or entirely new
template language to be constructed and used by the Template Toolkit.
Source templates are compiled to Python code by the template.parser
module using the template.grammar module (by default) to define the
language structure and semantics. Compiled templates are thus
inherently "compatible" with each other and there is nothing to prevent
any number of different template languages being compiled and used within
the same Template Toolkit processing environment (other than the usual
time and memory constraints).
The template.grammar file is constructed from a YACC like grammar
(using Parse::YAPP) and a skeleton module template. These files are
provided, along with a small script to rebuild the grammar, in the
'parser' sub-directory of the distribution. You don't have to know or
worry about these unless you want to hack on the template language or
define your own variant. There is a README file in the same directory
which provides some small guidance but it is assumed that you know
what you're doing if you venture herein. If you grok LALR parsers,
then you should find it comfortably familiar.
By default, an instance of the default template.grammar.Grammar will
be created and used automatically if a GRAMMAR item isn't specified.
import myorg.template.grammar
parser = template.parser.Parser({
'GRAMMAR': myorg.template.grammar.Grammar(),
})
* DEBUG
The DEBUG option can be used to enable various debugging features of
the Template::Parser module.
from template.constants import *
tt = template.Template({
'DEBUG': DEBUG_PARSER | DEBUG_DIRS,
})
The DEBUG value can include any of the following. Multiple values
should be combined using the logical OR operator, '|'.
** DEBUG_PARSER
This flag causes the Parser to generate debugging messages that show
the Python code generated by parsing and compiling each template.
** DEBUG_DIRS
This option causes the Template Toolkit to generate comments
indicating the source file, line and original text of each directive
in the template. These comments are embedded in the template output
using the format defined in the DEBUG_FORMAT configuration item, or a
simple default format if unspecified.
For example, the following template fragment:
Hello World
would generate this output:
## input text line 1 : ##
Hello
## input text line 2 : World ##
World
parse(text)
The parse() method parses the text passed in the first parameter and
returns a dictionary of data defining the compiled representation of
the template text, suitable for passing to the
template.document.Document constructor.
Example:
data = parser.parse(text)
The data dictionary returned contains a BLOCK item containing the
compiled Python code for the template, a DEFBLOCKS item containing a
dictionary of sub-template BLOCKs defined within in the template, and
a METADATA item containing a dictionary of metadata values defined in
META tags.
"""
CONTINUE = 0
ACCEPT = 1
ERROR = 2
ABORT = 3
TAG_STYLE = {
"default": (r"\[%", r"%\]"),
"template1": (r"[[%]%", r"%[]%]"),
"metatext": (r"%%", r"%%"),
"html": (r"<!--", r"-->"),
"mason": (r"<%", r">"),
"asp": (r"<%", r"%>"),
"php": (r"<\?", r"\?>"),
"star": (r"\[\*", r"\*\]"),
}
TAG_STYLE["template"] = TAG_STYLE["tt2"] = TAG_STYLE["default"]
DEFAULT_STYLE = {
"START_TAG": TAG_STYLE["default"][0],
"END_TAG": TAG_STYLE["default"][1],
"ANYCASE": 0,
"INTERPOLATE": 0,
"PRE_CHOMP": 0,
"POST_CHOMP": 0,
"V1DOLLAR": 0,
"EVAL_PYTHON": 0,
}
ESCAPE = {"n": "\n", "r": "\r", "t": "\t"}
CHOMP_FLAGS = r"[-=~+]"
CHOMP_ALL = str(CHOMP_ALL)
CHOMP_COLLAPSE = str(CHOMP_COLLAPSE)
CHOMP_GREEDY = str(CHOMP_GREEDY)
CHOMP_NONE = str(CHOMP_NONE)
CHOMP_CONST = {
"-": CHOMP_ALL,
"=": CHOMP_COLLAPSE,
"~": CHOMP_GREEDY,
"+": CHOMP_NONE
}
PRE_CHOMP = {
CHOMP_ALL: lambda x: re.sub(r"(\n|^)[^\S\n]*\Z", "", x),
CHOMP_COLLAPSE: lambda x: re.sub(r"\s+\Z", " ", x),
CHOMP_GREEDY: lambda x: re.sub(r"\s+\Z", "", x),
CHOMP_NONE: lambda x: x,
}
def postchomp(regex, prefix):
regex = re.compile(regex)
def strip(text, postlines):
match = regex.match(text)
if match:
text = prefix + text[match.end():]
postlines += match.group().count("\n")
return text, postlines
return strip
POST_CHOMP = {
CHOMP_ALL: postchomp(r"[^\S\n]*\n", ""),
CHOMP_COLLAPSE: postchomp(r"\s+", " "),
CHOMP_GREEDY: postchomp(r"\s+", ""),
CHOMP_NONE: lambda x, y: (x, y),
}
def Chomp(x):
return re.sub(r"[-=~+]", lambda m: CHOMP_CONST[m.group()], str(x))
GRAMMAR = re.compile(r"""
# strip out any comments
(\#[^\n]*)
|
# a quoted string matches in $3
(["']) # $2 - opening quote, ' or "
( # $3 - quoted text buffer
(?: # repeat group (no backreference)
\\\\ # an escaped backslash
| # ...or...
\\\2 # an escaped quote \" or \' (match $1)
| # ...or...
. # any other character
| \n
)*? # non-greedy repeat
) # end of $3
\2 # match opening quote
|
# an unquoted number matches in $4
(-? \d+ (?: \. \d+ )?) # numbers
|
# filename matches in $5
( /? \w+ (?: (?: /|::? ) \w* )+ | /\w+ )
|
# an identifier matches in $6
(\w+)
|
# an unquoted word or symbol matches in $7
( [(){}\[\]:;,/\\] # misc parentheses and symbols
| -> # arrow operator (for future?)
| [-+*] # math operations
| \${? # dollar with optional left brace
| => # like "="
| [=!<>]?= | [!<>] # equality tests
| &&? | \|\|? # boolean ops
| \.\.? # n..n sequence
| \S+ # something unquoted
) # end of $7
""", re.VERBOSE)
QUOTED_STRING = re.compile(r"""
( (?: \\. | [^\$] ){1,3000} ) # escaped or non-'$' character [$1]
|
( \$ (?: # embedded variable [$2]
(?: \{ ([^\}]*) \} ) # ${ ... } [$3]
|
([\w\.]+) # $word [$4]
)
)
""", re.VERBOSE)
class Error(Exception):
"""A trivial local exception class."""
pass
class Parser:
"""This module implements a LALR(1) parser and assocated support
methods to parse template documents into the appropriate "compiled"
format.
"""
def __init__(self, param):
self.start_tag = param.get("START_TAG") or DEFAULT_STYLE["START_TAG"]
self.end_tag = param.get("END_TAG") or DEFAULT_STYLE["END_TAG"]
self.tag_style = param.get("TAG_STYLE", "default")
self.anycase = param.get("ANYCASE", False)
self.interpolate = param.get("INTERPOLATE", False)
self.pre_chomp = param.get("PRE_CHOMP", CHOMP_NONE)
self.post_chomp = param.get("POST_CHOMP", CHOMP_NONE)
self.v1dollar = param.get("V1DOLLAR", False)
self.eval_python = param.get("EVAL_PYTHON", False)
self.file_info = param.get("FILE_INFO", 1)
self.grammar = param.get("GRAMMAR", Grammar())
self.factory = param.get("FACTORY", Directive)
self.fileinfo = []
self.defblocks = []
self.defblock_stack = []
self.infor = 0
self.inwhile = 0
self.style = []
# Build a FACTORY object to include any NAMESPACE definitions,
# but only if FACTORY isn't already a (non-callable) object.
if isinstance(self.factory, collections.Callable):
self.factory = self.factory(param)
self.lextable = self.grammar.lextable
self.states = self.grammar.states
self.rules = self.grammar.rules
self.new_style(param)
self.tokenize = (
((1,), self._comment),
((2, 3), self._string),
((4,), self._number),
((5,), self._filename),
((6,), self._identifier),
((7,), self._word),
)
def new_style(self, config):
"""Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc.
"""
if self.style:
style = self.style[-1]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get("TAG_STYLE")
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if tags is None:
raise Error("Invalid tag style: %s" % tagstyle)
start, end = tags
config["START_TAG"] = config.get("START_TAG", start)
config["END_TAG"] = config.get("END_TAG", end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if value is not None:
style[key] = value
self.style.append(style)
return style
def old_style(self):
"""Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
"""
if len(self.style) <= 1:
raise Error("only 1 parser style remaining")
self.style.pop()
return self.style[-1]
def location(self):
"""Return Python comment indicating current parser file and line."""
if not self.file_info:
return "\n"
line = self.line
info = self.fileinfo[-1]
file = info and (info.path or info.name) or "(unknown template)"
line = re.sub(r"-.*", "", str(line)) # might be 'n-n'
return '#line %s "%s"\n' % (line, file)
def parse(self, text, info=None):
"""Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document.
"""
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if tokens is None:
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {"BLOCK": block,
"DEFBLOCKS": self.defblock,
"METADATA": self.metadata}
else:
return None
def split_text(self, text):
"""Split input template text into directives and raw text chunks."""
tokens = []
line = 1
style = self.style[-1]
def make_splitter(delims):
return re.compile(r"(?s)(.*?)%s(.*?)%s" % delims)
splitter = make_splitter((style["START_TAG"], style["END_TAG"]))
while True:
match = splitter.match(text)
if not match:
break
text = text[match.end():]
pre, dir = match.group(1), match.group(2)
prelines = pre.count("\n")
dirlines = dir.count("\n")
postlines = 0
if dir.startswith("#"):
# commment out entire directive except for any end chomp flag
match = re.search(CHOMP_FLAGS + "$", dir)
if match:
dir = match.group()
else:
dir = ""
else:
# PRE_CHOMP: process whitespace before tag
match = re.match(r"(%s)?\s*" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["PRE_CHOMP"])
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
# POST_CHOMP: process whitespace after tag
match = re.search(r"\s*(%s)?\s*$" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["POST_CHOMP"])
if match:
dir = dir[:match.start()]
text, postlines = POST_CHOMP[chomp](text, postlines)
if pre:
if style["INTERPOLATE"]:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(["TEXT", pre])
line += prelines
if dir:
# The TAGS directive is a compile-time switch.
match = re.match(r"(?i)TAGS\s+(.*)", dir)
if match:
tags = re.split(r"\s+", match.group(1))
if len(tags) > 1:
splitter = make_splitter(tuple(re.escape(x) for x in tags[:2]))
elif tags[0] in TAG_STYLE:
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write("Invalid TAGS style: %s" % tags[0])
else:
if dirlines > 0:
line_range = "%d-%d" % (line, line + dirlines)
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += dirlines + postlines
if text:
if style["INTERPOLATE"]:
tokens.append([text, line, "ITEXT"])
else:
tokens.extend(["TEXT", text])
return tokens
def _comment(self, token):
"""Tokenizes a comment."""
return ()
def _string(self, quote, token):
"""Tokenizes a string."""
if quote == '"':
if re.search(r"[$\\]", token):
# unescape " and \ but leave \$ escaped so that
# interpolate_text() doesn't incorrectly treat it
# as a variable reference
token = re.sub(r'\\([\\"])', r'\1', token)
token = re.sub(r'\\([^$nrt])', r'\1', token)
token = re.sub(r'\\([nrt])', lambda m: ESCAPE[m.group(1)], token)
return ['"', '"'] + self.interpolate_text(token) + ['"', '"']
else:
return "LITERAL", "scalar(%r)" % token
else:
# Remove escaped single quotes and backslashes:
token = re.sub(r"\\(.)", lambda m: m.group(m.group(1) in "'\\"), token)
return "LITERAL", "scalar(%r)" % token
def _number(self, token):
"""Tokenizes a number."""
return "NUMBER", "scalar(%s)" % token
def _filename(self, token):
"""Tokenizes a filename."""
return "FILENAME", token
def _identifier(self, token):
"""Tokenizes an identifier."""
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if toktype is not None:
return toktype, uctoken
else:
return "IDENT", token
def _word(self, token):
"""Tokenizes an unquoted word or symbol ."""
return self.lextable.get(token, "UNQUOTED"), token
def tokenise_directive(self, dirtext):
"""Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text.
"""
tokens = []
for match in GRAMMAR.finditer(dirtext):
for indices, method in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens
def _parse(self, tokens, info):
"""Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details.
"""
self.grammar.install_factory(self.factory)
stack = [[0, None]] # DFA stack
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = info and info.name
self.inpython = 0
value = None
while True:
stateno = stack[-1][0]
state = self.states[stateno]
# see if any lookaheads exist for the current state
if "ACTIONS" in state:
# get next token and expand any directives (ie. token is a
# list) onto the front of the token list
while token is None and tokens:
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
text, self.line, token = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = token + [";", ";"]
token = None # force redo
elif token == "ITEXT":
if in_python:
# don't perform interpolation in PYTHON blocks
token = "TEXT"
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None # force redo
else:
# toggle string flag to indicate if we're crossing
# a string boundary
if token == '"':
in_string = not in_string
value = tokens and tokens.pop(0) or None
if token is None:
token = ""
# get the next state for the current lookahead token
lookup = state["ACTIONS"].get(token)
if lookup:
action = lookup
else:
action = state.get("DEFAULT")
else:
# no lookahead assertions
action = state.get("DEFAULT")
# ERROR: no ACTION
if action is None:
break
# shift (positive ACTION)
if action > 0:
stack.append([action, value])
token = value = None
else:
# reduce (negative ACTION)
lhs, len_, code = self.rules[-action]
# no action implies ACCEPTance
if not action:
status = ACCEPT
# use dummy sub if code ref doesn't exist
if not code:
code = lambda *arg: len(arg) >= 2 and arg[1] or None
if len_ > 0:
codevars = [x[1] for x in stack[-len_:]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
# reduce stack by len_
if len_ > 0:
stack[-len_:] = []
# ACCEPT
if status == ACCEPT:
return coderet
elif status == ABORT:
return None
elif status == ERROR:
break
stack.append([self.states[stack[-1][0]].get("GOTOS", {}).get(lhs),
coderet])
# ERROR
if value is None:
self._parse_error("unexpected end of input", info.name)
elif value == ";":
self._parse_error("unexpected end of directive", info.name, text)
else:
self._parse_error("unexpected token (%s)" %
util.unscalar_lex(value), info.name, text)
def _parse_error(self, msg, name, text=None):
"""Method used to handle errors encountered during the parse process
in the _parse() method.
"""
line = self.line or "unknown"
if text is not None:
msg += "\n [%% %s %%]" % text
raise TemplateException("parse", "%s line %s: %s" % (name, line, msg))
def define_block(self, name, block):
"""Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
"""
if self.defblock is None:
return None
self.defblock[name] = block
return None
def push_defblock(self):
self.defblock_stack.append(self.defblock)
self.defblock = {}
def pop_defblock(self):
if not self.defblock_stack:
return self.defblock
block = self.defblock
self.defblock = self.defblock_stack.pop(0)
return block
def add_metadata(self, setlist):
setlist = [util.unscalar_lex(x) for x in setlist]
if self.metadata is not None:
for key, value in util.chop(setlist, 2):
self.metadata[key] = value
return None
def interpolate_text(self, text, line=0):
"""Examines text looking for any variable references embedded
like $this or like ${ this }.
"""
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = match.group(3) or match.group(4)
dir = match.group(2)
# preceding text
if pre:
line += pre.count("\n")
tokens.extend(("TEXT", pre.replace("\\$", "$")))
# variable reference
if var:
line += dir.count("\n")
tokens.append([dir, line, self.tokenise_directive(var)])
# other '$' reference - treated as text
elif dir:
line += dir.count("\n")
tokens.extend(("TEXT", dir))
return tokens
|
define_block
|
Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
|
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import collections
import re
import sys
from template import util
from template.constants import *
from template.directive import Directive
from template.grammar import Grammar
from template.util import TemplateException
"""
template.parser - LALR(1) parser for compiling template documents
SYNOPSIS
import template.parser
parser = template.parser.Parser(config)
template = parser.parse(text)
DESCRIPTION
The template.parser module implements a LALR(1) parser and associated
methods for parsing template documents into Python code.
PUBLIC METHODS
__init__(params)
The constructor initializes a new template.parser.Parser object. A
dictionary may be supplied as a parameter to provide configuration
values. These may include:
* START_TAG, END_TAG
The START_TAG and END_TAG options are used to specify character
sequences or regular expressions that mark the start and end of a
template directive. The default values for START_TAG and END_TAG are
'[%' and '%]' respectively, giving us the familiar directive style:
[% example %]
Any Python regex characters can be used and therefore should be
escaped (or use the re.escape function) if they are intended to
represent literal characters.
parser = template.parser.Parser({
'START_TAG': re.escape('<+'),
'END_TAG': re.escape('+>'),
})
example:
<+ INCLUDE foobar +>
The TAGS directive can also be used to set the START_TAG and END_TAG values
on a per-template file basis.
[% TAGS <+ +> %]
* TAG_STYLE
The TAG_STYLE option can be used to set both START_TAG and END_TAG
according to pre-defined tag styles.
parser = template.parser.Parser({
'TAG_STYLE': 'star',
})
Available styles are:
template [% ... %] (default)
template1 [% ... %] or %% ... %% (TT version 1)
metatext %% ... %% (Text::MetaText)
star [* ... *] (TT alternate)
php <? ... ?> (PHP)
asp <% ... %> (ASP)
mason <% ... > (HTML::Mason)
html <!-- ... --> (HTML comments)
Any values specified for START_TAG and/or END_TAG will over-ride those
defined by a TAG_STYLE.
The TAGS directive may also be used to set a TAG_STYLE
[% TAGS html %]
<!-- INCLUDE header -->
* PRE_CHOMP, POST_CHOMP
Anything outside a directive tag is considered plain text and is
generally passed through unaltered (but see the INTERPOLATE option).
This includes all whitespace and newlines characters surrounding
directive tags. Directives that don't generate any output will leave
gaps in the output document.
Example:
Foo
[% a = 10 %]
Bar
Output:
Foo
Bar
The PRE_CHOMP and POST_CHOMP options can help to clean up some of this
extraneous whitespace. Both are disabled by default.
parser = template.parser.Parser({
'PRE_CHOMP': 1,
'POST_CHOMP': 1,
})
With PRE_CHOMP set to 1, the newline and whitespace preceding a
directive at the start of a line will be deleted. This has the effect
of concatenating a line that starts with a directive onto the end of
the previous line.
Foo E<lt>----------.
|
,---(PRE_CHOMP)----'
|
`-- [% a = 10 %] --.
|
,---(POST_CHOMP)---'
|
`-E<gt> Bar
With POST_CHOMP set to 1, any whitespace after a directive up to and
including the newline will be deleted. This has the effect of joining
a line that ends with a directive onto the start of the next line.
If PRE_CHOMP or POST_CHOMP is set to 2, all whitespace including any
number of newline will be removed and replaced with a single space.
This is useful for HTML, where (usually) a contiguous block of
whitespace is rendered the same as a single space.
With PRE_CHOMP or POST_CHOMP set to 3, all adjacent whitespace
(including newlines) will be removed entirely.
These values are defined as CHOMP_NONE, CHOMP_ONE, CHOMP_COLLAPSE and
CHOMP_GREEDY constants in the template.constants module. CHOMP_ALL
is also defined as an alias for CHOMP_ONE to provide backwards
compatability with earlier version of the Template Toolkit.
Additionally the chomp tag modifiers listed below may also be used for
the PRE_CHOMP and POST_CHOMP configuration.
tt = template.Template({
'PRE_CHOMP': '~',
'POST_CHOMP': '-',
})
PRE_CHOMP and POST_CHOMP can be activated for individual directives by
placing a '-' immediately at the start and/or end of the directive.
[% FOREACH user IN userlist %]
[%- user -%]
[% END %]
This has the same effect as CHOMP_ONE in removing all whitespace
before or after the directive up to and including the newline. The
template will be processed as if written:
[% FOREACH user IN userlist %][% user %][% END %]
To remove all whitespace including any number of newlines, use the '~'
character instead.
[% FOREACH user IN userlist %]
[%~ user ~%]
[% END %]
To collapse all whitespace to a single space, use the '=' character.
[% FOREACH user IN userlist %]
[%= user =%]
[% END %]
Here the template is processed as if written:
[% FOREACH user IN userlist %] [% user %] [% END %]
If you have PRE_CHOMP or POST_CHOMP set as configuration options then
you can use '+' to disable any chomping options (i.e. leave the
whitespace intact) on a per-directive basis.
[% FOREACH user = userlist %]
User: [% user +%]
[% END %]
With POST_CHOMP set to CHOMP_ONE, the above example would be parsed as
if written:
[% FOREACH user = userlist %]User: [% user %]
[% END %]
For reference, the PRE_CHOMP and POST_CHOMP configuration options may be set to any of the following:
Constant Value Tag Modifier
----------------------------------
CHOMP_NONE 0 +
CHOMP_ONE 1 -
CHOMP_COLLAPSE 2 =
CHOMP_GREEDY 3 ~
* INTERPOLATE
The INTERPOLATE flag, when set to any true value will cause variable
references in plain text (i.e. not surrounded by START_TAG and
END_TAG) to be recognised and interpolated accordingly.
parser = template.parser.Parser({
'INTERPOLATE': 1,
})
Variables should be prefixed by a '$' to identify them. Curly braces
can be used in the familiar Perl/shell style to explicitly scope the
variable name where required.
# INTERPOLATE => 0
<a href="http://[% server %]/[% help %]">
<img src="[% images %]/help.gif"></a>
[% myorg.name %]
# INTERPOLATE => 1
<a href="http://$server/$help">
<img src="$images/help.gif"></a>
$myorg.name
# explicit scoping with { }
<img src="$images/${icon.next}.gif">
Note that a limitation in Perl's regex engine restricts the maximum
length of an interpolated template to around 32 kilobytes or possibly
less. Files that exceed this limit in size will typically cause Perl
to dump core with a segmentation fault. If you routinely process
templates of this size then you should disable INTERPOLATE or split
the templates in several smaller files or blocks which can then be
joined backed together via PROCESS or INCLUDE.
It is unknown whether this limitation is shared by the Python regex
engine.
* ANYCASE
By default, directive keywords should be expressed in UPPER CASE. The
ANYCASE option can be set to allow directive keywords to be specified
in any case.
# ANYCASE => 0 (default)
[% INCLUDE foobar %] # OK
[% include foobar %] # ERROR
[% include = 10 %] # OK, 'include' is a variable
# ANYCASE => 1
[% INCLUDE foobar %] # OK
[% include foobar %] # OK
[% include = 10 %] # ERROR, 'include' is reserved word
One side-effect of enabling ANYCASE is that you cannot use a variable
of the same name as a reserved word, regardless of case. The reserved
words are currently:
GET CALL SET DEFAULT INSERT INCLUDE PROCESS WRAPPER
IF UNLESS ELSE ELSIF FOR FOREACH WHILE SWITCH CASE
USE PLUGIN FILTER MACRO PYTHON RAWPYTHON BLOCK META
TRY THROW CATCH FINAL NEXT LAST BREAK RETURN STOP
CLEAR TO STEP AND OR NOT MOD DIV END
The only lower case reserved words that cannot be used for variables,
regardless of the ANYCASE option, are the operators:
and or not mod div
* V1DOLLAR
In version 1 of the Template Toolkit, an optional leading '$' could be placed
on any template variable and would be silently ignored.
# VERSION 1
[% $foo %] === [% foo %]
[% $hash.$key %] === [% hash.key %]
To interpolate a variable value the '${' ... '}' construct was used.
Typically, one would do this to index into a hash array when the key
value was stored in a variable.
example:
vars = {
users => {
'aba': { 'name': 'Alan Aardvark', ... },
'abw': { 'name': 'Andy Wardley', ... },
...
},
'uid': 'aba',
...
}
template.process('user/home.html', vars)
'user/home.html':
[% user = users.${uid} %] # users.aba
Name: [% user.name %] # Alan Aardvark
This was inconsistent with double quoted strings and also the
INTERPOLATE mode, where a leading '$' in text was enough to indicate a
variable for interpolation, and the additional curly braces were used
to delimit variable names where necessary. Note that this use is
consistent with UNIX and Perl conventions, among others.
# double quoted string interpolation
[% name = "$title ${user.name}" %]
# INTERPOLATE = 1
<img src="$images/help.gif"></a>
<img src="$images/${icon.next}.gif">
For version 2, these inconsistencies have been removed and the syntax
clarified. A leading '$' on a variable is now used exclusively to
indicate that the variable name should be interpolated
(e.g. subsituted for its value) before being used. The earlier example
from version 1:
# VERSION 1
[% user = users.${uid} %]
Name: [% user.name %]
can now be simplified in version 2 as:
# VERSION 2
[% user = users.$uid %]
Name: [% user.name %]
The leading dollar is no longer ignored and has the same effect of
interpolation as '${' ... '}' in version 1. The curly braces may
still be used to explicitly scope the interpolated variable name
where necessary.
e.g.
[% user = users.${me.id} %]
Name: [% user.name %]
The rule applies for all variables, both within directives and in
plain text if processed with the INTERPOLATE option. This means that
you should no longer (if you ever did) add a leading '$' to a variable
inside a directive, unless you explicitly want it to be interpolated.
One obvious side-effect is that any version 1 templates with variables
using a leading '$' will no longer be processed as expected. Given
the following variable definitions,
[% foo = 'bar'
bar = 'baz'
%]
version 1 would interpret the following as:
# VERSION 1
[% $foo %] => [% GET foo %] => bar
whereas version 2 interprets it as:
# VERSION 2
[% $foo %] => [% GET $foo %] => [% GET bar %] => baz
In version 1, the '$' is ignored and the value for the variable 'foo'
is retrieved and printed. In version 2, the variable '$foo' is first
interpolated to give the variable name 'bar' whose value is then
retrieved and printed.
The use of the optional '$' has never been strongly recommended, but
to assist in backwards compatibility with any version 1 templates that
may rely on this "feature", the V1DOLLAR option can be set to 1
(default: 0) to revert the behaviour and have leading '$' characters
ignored.
parser = template.parser.Parser->new({
'V1DOLLAR': 1,
});
* GRAMMAR
The GRAMMAR configuration item can be used to specify an alternate
grammar for the parser. This allows a modified or entirely new
template language to be constructed and used by the Template Toolkit.
Source templates are compiled to Python code by the template.parser
module using the template.grammar module (by default) to define the
language structure and semantics. Compiled templates are thus
inherently "compatible" with each other and there is nothing to prevent
any number of different template languages being compiled and used within
the same Template Toolkit processing environment (other than the usual
time and memory constraints).
The template.grammar file is constructed from a YACC like grammar
(using Parse::YAPP) and a skeleton module template. These files are
provided, along with a small script to rebuild the grammar, in the
'parser' sub-directory of the distribution. You don't have to know or
worry about these unless you want to hack on the template language or
define your own variant. There is a README file in the same directory
which provides some small guidance but it is assumed that you know
what you're doing if you venture herein. If you grok LALR parsers,
then you should find it comfortably familiar.
By default, an instance of the default template.grammar.Grammar will
be created and used automatically if a GRAMMAR item isn't specified.
import myorg.template.grammar
parser = template.parser.Parser({
'GRAMMAR': myorg.template.grammar.Grammar(),
})
* DEBUG
The DEBUG option can be used to enable various debugging features of
the Template::Parser module.
from template.constants import *
tt = template.Template({
'DEBUG': DEBUG_PARSER | DEBUG_DIRS,
})
The DEBUG value can include any of the following. Multiple values
should be combined using the logical OR operator, '|'.
** DEBUG_PARSER
This flag causes the Parser to generate debugging messages that show
the Python code generated by parsing and compiling each template.
** DEBUG_DIRS
This option causes the Template Toolkit to generate comments
indicating the source file, line and original text of each directive
in the template. These comments are embedded in the template output
using the format defined in the DEBUG_FORMAT configuration item, or a
simple default format if unspecified.
For example, the following template fragment:
Hello World
would generate this output:
## input text line 1 : ##
Hello
## input text line 2 : World ##
World
parse(text)
The parse() method parses the text passed in the first parameter and
returns a dictionary of data defining the compiled representation of
the template text, suitable for passing to the
template.document.Document constructor.
Example:
data = parser.parse(text)
The data dictionary returned contains a BLOCK item containing the
compiled Python code for the template, a DEFBLOCKS item containing a
dictionary of sub-template BLOCKs defined within in the template, and
a METADATA item containing a dictionary of metadata values defined in
META tags.
"""
CONTINUE = 0
ACCEPT = 1
ERROR = 2
ABORT = 3
TAG_STYLE = {
"default": (r"\[%", r"%\]"),
"template1": (r"[[%]%", r"%[]%]"),
"metatext": (r"%%", r"%%"),
"html": (r"<!--", r"-->"),
"mason": (r"<%", r">"),
"asp": (r"<%", r"%>"),
"php": (r"<\?", r"\?>"),
"star": (r"\[\*", r"\*\]"),
}
TAG_STYLE["template"] = TAG_STYLE["tt2"] = TAG_STYLE["default"]
DEFAULT_STYLE = {
"START_TAG": TAG_STYLE["default"][0],
"END_TAG": TAG_STYLE["default"][1],
"ANYCASE": 0,
"INTERPOLATE": 0,
"PRE_CHOMP": 0,
"POST_CHOMP": 0,
"V1DOLLAR": 0,
"EVAL_PYTHON": 0,
}
ESCAPE = {"n": "\n", "r": "\r", "t": "\t"}
CHOMP_FLAGS = r"[-=~+]"
CHOMP_ALL = str(CHOMP_ALL)
CHOMP_COLLAPSE = str(CHOMP_COLLAPSE)
CHOMP_GREEDY = str(CHOMP_GREEDY)
CHOMP_NONE = str(CHOMP_NONE)
CHOMP_CONST = {
"-": CHOMP_ALL,
"=": CHOMP_COLLAPSE,
"~": CHOMP_GREEDY,
"+": CHOMP_NONE
}
PRE_CHOMP = {
CHOMP_ALL: lambda x: re.sub(r"(\n|^)[^\S\n]*\Z", "", x),
CHOMP_COLLAPSE: lambda x: re.sub(r"\s+\Z", " ", x),
CHOMP_GREEDY: lambda x: re.sub(r"\s+\Z", "", x),
CHOMP_NONE: lambda x: x,
}
def postchomp(regex, prefix):
regex = re.compile(regex)
def strip(text, postlines):
match = regex.match(text)
if match:
text = prefix + text[match.end():]
postlines += match.group().count("\n")
return text, postlines
return strip
POST_CHOMP = {
CHOMP_ALL: postchomp(r"[^\S\n]*\n", ""),
CHOMP_COLLAPSE: postchomp(r"\s+", " "),
CHOMP_GREEDY: postchomp(r"\s+", ""),
CHOMP_NONE: lambda x, y: (x, y),
}
def Chomp(x):
return re.sub(r"[-=~+]", lambda m: CHOMP_CONST[m.group()], str(x))
GRAMMAR = re.compile(r"""
# strip out any comments
(\#[^\n]*)
|
# a quoted string matches in $3
(["']) # $2 - opening quote, ' or "
( # $3 - quoted text buffer
(?: # repeat group (no backreference)
\\\\ # an escaped backslash
| # ...or...
\\\2 # an escaped quote \" or \' (match $1)
| # ...or...
. # any other character
| \n
)*? # non-greedy repeat
) # end of $3
\2 # match opening quote
|
# an unquoted number matches in $4
(-? \d+ (?: \. \d+ )?) # numbers
|
# filename matches in $5
( /? \w+ (?: (?: /|::? ) \w* )+ | /\w+ )
|
# an identifier matches in $6
(\w+)
|
# an unquoted word or symbol matches in $7
( [(){}\[\]:;,/\\] # misc parentheses and symbols
| -> # arrow operator (for future?)
| [-+*] # math operations
| \${? # dollar with optional left brace
| => # like "="
| [=!<>]?= | [!<>] # equality tests
| &&? | \|\|? # boolean ops
| \.\.? # n..n sequence
| \S+ # something unquoted
) # end of $7
""", re.VERBOSE)
QUOTED_STRING = re.compile(r"""
( (?: \\. | [^\$] ){1,3000} ) # escaped or non-'$' character [$1]
|
( \$ (?: # embedded variable [$2]
(?: \{ ([^\}]*) \} ) # ${ ... } [$3]
|
([\w\.]+) # $word [$4]
)
)
""", re.VERBOSE)
class Error(Exception):
"""A trivial local exception class."""
pass
class Parser:
"""This module implements a LALR(1) parser and assocated support
methods to parse template documents into the appropriate "compiled"
format.
"""
def __init__(self, param):
self.start_tag = param.get("START_TAG") or DEFAULT_STYLE["START_TAG"]
self.end_tag = param.get("END_TAG") or DEFAULT_STYLE["END_TAG"]
self.tag_style = param.get("TAG_STYLE", "default")
self.anycase = param.get("ANYCASE", False)
self.interpolate = param.get("INTERPOLATE", False)
self.pre_chomp = param.get("PRE_CHOMP", CHOMP_NONE)
self.post_chomp = param.get("POST_CHOMP", CHOMP_NONE)
self.v1dollar = param.get("V1DOLLAR", False)
self.eval_python = param.get("EVAL_PYTHON", False)
self.file_info = param.get("FILE_INFO", 1)
self.grammar = param.get("GRAMMAR", Grammar())
self.factory = param.get("FACTORY", Directive)
self.fileinfo = []
self.defblocks = []
self.defblock_stack = []
self.infor = 0
self.inwhile = 0
self.style = []
# Build a FACTORY object to include any NAMESPACE definitions,
# but only if FACTORY isn't already a (non-callable) object.
if isinstance(self.factory, collections.Callable):
self.factory = self.factory(param)
self.lextable = self.grammar.lextable
self.states = self.grammar.states
self.rules = self.grammar.rules
self.new_style(param)
self.tokenize = (
((1,), self._comment),
((2, 3), self._string),
((4,), self._number),
((5,), self._filename),
((6,), self._identifier),
((7,), self._word),
)
def new_style(self, config):
"""Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc.
"""
if self.style:
style = self.style[-1]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get("TAG_STYLE")
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if tags is None:
raise Error("Invalid tag style: %s" % tagstyle)
start, end = tags
config["START_TAG"] = config.get("START_TAG", start)
config["END_TAG"] = config.get("END_TAG", end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if value is not None:
style[key] = value
self.style.append(style)
return style
def old_style(self):
"""Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
"""
if len(self.style) <= 1:
raise Error("only 1 parser style remaining")
self.style.pop()
return self.style[-1]
def location(self):
"""Return Python comment indicating current parser file and line."""
if not self.file_info:
return "\n"
line = self.line
info = self.fileinfo[-1]
file = info and (info.path or info.name) or "(unknown template)"
line = re.sub(r"-.*", "", str(line)) # might be 'n-n'
return '#line %s "%s"\n' % (line, file)
def parse(self, text, info=None):
"""Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document.
"""
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if tokens is None:
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {"BLOCK": block,
"DEFBLOCKS": self.defblock,
"METADATA": self.metadata}
else:
return None
def split_text(self, text):
"""Split input template text into directives and raw text chunks."""
tokens = []
line = 1
style = self.style[-1]
def make_splitter(delims):
return re.compile(r"(?s)(.*?)%s(.*?)%s" % delims)
splitter = make_splitter((style["START_TAG"], style["END_TAG"]))
while True:
match = splitter.match(text)
if not match:
break
text = text[match.end():]
pre, dir = match.group(1), match.group(2)
prelines = pre.count("\n")
dirlines = dir.count("\n")
postlines = 0
if dir.startswith("#"):
# commment out entire directive except for any end chomp flag
match = re.search(CHOMP_FLAGS + "$", dir)
if match:
dir = match.group()
else:
dir = ""
else:
# PRE_CHOMP: process whitespace before tag
match = re.match(r"(%s)?\s*" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["PRE_CHOMP"])
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
# POST_CHOMP: process whitespace after tag
match = re.search(r"\s*(%s)?\s*$" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["POST_CHOMP"])
if match:
dir = dir[:match.start()]
text, postlines = POST_CHOMP[chomp](text, postlines)
if pre:
if style["INTERPOLATE"]:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(["TEXT", pre])
line += prelines
if dir:
# The TAGS directive is a compile-time switch.
match = re.match(r"(?i)TAGS\s+(.*)", dir)
if match:
tags = re.split(r"\s+", match.group(1))
if len(tags) > 1:
splitter = make_splitter(tuple(re.escape(x) for x in tags[:2]))
elif tags[0] in TAG_STYLE:
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write("Invalid TAGS style: %s" % tags[0])
else:
if dirlines > 0:
line_range = "%d-%d" % (line, line + dirlines)
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += dirlines + postlines
if text:
if style["INTERPOLATE"]:
tokens.append([text, line, "ITEXT"])
else:
tokens.extend(["TEXT", text])
return tokens
def _comment(self, token):
"""Tokenizes a comment."""
return ()
def _string(self, quote, token):
"""Tokenizes a string."""
if quote == '"':
if re.search(r"[$\\]", token):
# unescape " and \ but leave \$ escaped so that
# interpolate_text() doesn't incorrectly treat it
# as a variable reference
token = re.sub(r'\\([\\"])', r'\1', token)
token = re.sub(r'\\([^$nrt])', r'\1', token)
token = re.sub(r'\\([nrt])', lambda m: ESCAPE[m.group(1)], token)
return ['"', '"'] + self.interpolate_text(token) + ['"', '"']
else:
return "LITERAL", "scalar(%r)" % token
else:
# Remove escaped single quotes and backslashes:
token = re.sub(r"\\(.)", lambda m: m.group(m.group(1) in "'\\"), token)
return "LITERAL", "scalar(%r)" % token
def _number(self, token):
"""Tokenizes a number."""
return "NUMBER", "scalar(%s)" % token
def _filename(self, token):
"""Tokenizes a filename."""
return "FILENAME", token
def _identifier(self, token):
"""Tokenizes an identifier."""
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if toktype is not None:
return toktype, uctoken
else:
return "IDENT", token
def _word(self, token):
"""Tokenizes an unquoted word or symbol ."""
return self.lextable.get(token, "UNQUOTED"), token
def tokenise_directive(self, dirtext):
"""Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text.
"""
tokens = []
for match in GRAMMAR.finditer(dirtext):
for indices, method in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens
def _parse(self, tokens, info):
"""Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details.
"""
self.grammar.install_factory(self.factory)
stack = [[0, None]] # DFA stack
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = info and info.name
self.inpython = 0
value = None
while True:
stateno = stack[-1][0]
state = self.states[stateno]
# see if any lookaheads exist for the current state
if "ACTIONS" in state:
# get next token and expand any directives (ie. token is a
# list) onto the front of the token list
while token is None and tokens:
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
text, self.line, token = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = token + [";", ";"]
token = None # force redo
elif token == "ITEXT":
if in_python:
# don't perform interpolation in PYTHON blocks
token = "TEXT"
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None # force redo
else:
# toggle string flag to indicate if we're crossing
# a string boundary
if token == '"':
in_string = not in_string
value = tokens and tokens.pop(0) or None
if token is None:
token = ""
# get the next state for the current lookahead token
lookup = state["ACTIONS"].get(token)
if lookup:
action = lookup
else:
action = state.get("DEFAULT")
else:
# no lookahead assertions
action = state.get("DEFAULT")
# ERROR: no ACTION
if action is None:
break
# shift (positive ACTION)
if action > 0:
stack.append([action, value])
token = value = None
else:
# reduce (negative ACTION)
lhs, len_, code = self.rules[-action]
# no action implies ACCEPTance
if not action:
status = ACCEPT
# use dummy sub if code ref doesn't exist
if not code:
code = lambda *arg: len(arg) >= 2 and arg[1] or None
if len_ > 0:
codevars = [x[1] for x in stack[-len_:]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
# reduce stack by len_
if len_ > 0:
stack[-len_:] = []
# ACCEPT
if status == ACCEPT:
return coderet
elif status == ABORT:
return None
elif status == ERROR:
break
stack.append([self.states[stack[-1][0]].get("GOTOS", {}).get(lhs),
coderet])
# ERROR
if value is None:
self._parse_error("unexpected end of input", info.name)
elif value == ";":
self._parse_error("unexpected end of directive", info.name, text)
else:
self._parse_error("unexpected token (%s)" %
util.unscalar_lex(value), info.name, text)
def _parse_error(self, msg, name, text=None):
"""Method used to handle errors encountered during the parse process
in the _parse() method.
"""
line = self.line or "unknown"
if text is not None:
msg += "\n [%% %s %%]" % text
raise TemplateException("parse", "%s line %s: %s" % (name, line, msg))
# MASKED: define_block function (lines 1011-1025)
def push_defblock(self):
self.defblock_stack.append(self.defblock)
self.defblock = {}
def pop_defblock(self):
if not self.defblock_stack:
return self.defblock
block = self.defblock
self.defblock = self.defblock_stack.pop(0)
return block
def add_metadata(self, setlist):
setlist = [util.unscalar_lex(x) for x in setlist]
if self.metadata is not None:
for key, value in util.chop(setlist, 2):
self.metadata[key] = value
return None
def interpolate_text(self, text, line=0):
"""Examines text looking for any variable references embedded
like $this or like ${ this }.
"""
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = match.group(3) or match.group(4)
dir = match.group(2)
# preceding text
if pre:
line += pre.count("\n")
tokens.extend(("TEXT", pre.replace("\\$", "$")))
# variable reference
if var:
line += dir.count("\n")
tokens.append([dir, line, self.tokenise_directive(var)])
# other '$' reference - treated as text
elif dir:
line += dir.count("\n")
tokens.extend(("TEXT", dir))
return tokens
|
def define_block(self, name, block):
"""Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
"""
if self.defblock is None:
return None
self.defblock[name] = block
return None
| 1,011 | 1,025 |
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import collections
import re
import sys
from template import util
from template.constants import *
from template.directive import Directive
from template.grammar import Grammar
from template.util import TemplateException
"""
template.parser - LALR(1) parser for compiling template documents
SYNOPSIS
import template.parser
parser = template.parser.Parser(config)
template = parser.parse(text)
DESCRIPTION
The template.parser module implements a LALR(1) parser and associated
methods for parsing template documents into Python code.
PUBLIC METHODS
__init__(params)
The constructor initializes a new template.parser.Parser object. A
dictionary may be supplied as a parameter to provide configuration
values. These may include:
* START_TAG, END_TAG
The START_TAG and END_TAG options are used to specify character
sequences or regular expressions that mark the start and end of a
template directive. The default values for START_TAG and END_TAG are
'[%' and '%]' respectively, giving us the familiar directive style:
[% example %]
Any Python regex characters can be used and therefore should be
escaped (or use the re.escape function) if they are intended to
represent literal characters.
parser = template.parser.Parser({
'START_TAG': re.escape('<+'),
'END_TAG': re.escape('+>'),
})
example:
<+ INCLUDE foobar +>
The TAGS directive can also be used to set the START_TAG and END_TAG values
on a per-template file basis.
[% TAGS <+ +> %]
* TAG_STYLE
The TAG_STYLE option can be used to set both START_TAG and END_TAG
according to pre-defined tag styles.
parser = template.parser.Parser({
'TAG_STYLE': 'star',
})
Available styles are:
template [% ... %] (default)
template1 [% ... %] or %% ... %% (TT version 1)
metatext %% ... %% (Text::MetaText)
star [* ... *] (TT alternate)
php <? ... ?> (PHP)
asp <% ... %> (ASP)
mason <% ... > (HTML::Mason)
html <!-- ... --> (HTML comments)
Any values specified for START_TAG and/or END_TAG will over-ride those
defined by a TAG_STYLE.
The TAGS directive may also be used to set a TAG_STYLE
[% TAGS html %]
<!-- INCLUDE header -->
* PRE_CHOMP, POST_CHOMP
Anything outside a directive tag is considered plain text and is
generally passed through unaltered (but see the INTERPOLATE option).
This includes all whitespace and newlines characters surrounding
directive tags. Directives that don't generate any output will leave
gaps in the output document.
Example:
Foo
[% a = 10 %]
Bar
Output:
Foo
Bar
The PRE_CHOMP and POST_CHOMP options can help to clean up some of this
extraneous whitespace. Both are disabled by default.
parser = template.parser.Parser({
'PRE_CHOMP': 1,
'POST_CHOMP': 1,
})
With PRE_CHOMP set to 1, the newline and whitespace preceding a
directive at the start of a line will be deleted. This has the effect
of concatenating a line that starts with a directive onto the end of
the previous line.
Foo E<lt>----------.
|
,---(PRE_CHOMP)----'
|
`-- [% a = 10 %] --.
|
,---(POST_CHOMP)---'
|
`-E<gt> Bar
With POST_CHOMP set to 1, any whitespace after a directive up to and
including the newline will be deleted. This has the effect of joining
a line that ends with a directive onto the start of the next line.
If PRE_CHOMP or POST_CHOMP is set to 2, all whitespace including any
number of newline will be removed and replaced with a single space.
This is useful for HTML, where (usually) a contiguous block of
whitespace is rendered the same as a single space.
With PRE_CHOMP or POST_CHOMP set to 3, all adjacent whitespace
(including newlines) will be removed entirely.
These values are defined as CHOMP_NONE, CHOMP_ONE, CHOMP_COLLAPSE and
CHOMP_GREEDY constants in the template.constants module. CHOMP_ALL
is also defined as an alias for CHOMP_ONE to provide backwards
compatability with earlier version of the Template Toolkit.
Additionally the chomp tag modifiers listed below may also be used for
the PRE_CHOMP and POST_CHOMP configuration.
tt = template.Template({
'PRE_CHOMP': '~',
'POST_CHOMP': '-',
})
PRE_CHOMP and POST_CHOMP can be activated for individual directives by
placing a '-' immediately at the start and/or end of the directive.
[% FOREACH user IN userlist %]
[%- user -%]
[% END %]
This has the same effect as CHOMP_ONE in removing all whitespace
before or after the directive up to and including the newline. The
template will be processed as if written:
[% FOREACH user IN userlist %][% user %][% END %]
To remove all whitespace including any number of newlines, use the '~'
character instead.
[% FOREACH user IN userlist %]
[%~ user ~%]
[% END %]
To collapse all whitespace to a single space, use the '=' character.
[% FOREACH user IN userlist %]
[%= user =%]
[% END %]
Here the template is processed as if written:
[% FOREACH user IN userlist %] [% user %] [% END %]
If you have PRE_CHOMP or POST_CHOMP set as configuration options then
you can use '+' to disable any chomping options (i.e. leave the
whitespace intact) on a per-directive basis.
[% FOREACH user = userlist %]
User: [% user +%]
[% END %]
With POST_CHOMP set to CHOMP_ONE, the above example would be parsed as
if written:
[% FOREACH user = userlist %]User: [% user %]
[% END %]
For reference, the PRE_CHOMP and POST_CHOMP configuration options may be set to any of the following:
Constant Value Tag Modifier
----------------------------------
CHOMP_NONE 0 +
CHOMP_ONE 1 -
CHOMP_COLLAPSE 2 =
CHOMP_GREEDY 3 ~
* INTERPOLATE
The INTERPOLATE flag, when set to any true value will cause variable
references in plain text (i.e. not surrounded by START_TAG and
END_TAG) to be recognised and interpolated accordingly.
parser = template.parser.Parser({
'INTERPOLATE': 1,
})
Variables should be prefixed by a '$' to identify them. Curly braces
can be used in the familiar Perl/shell style to explicitly scope the
variable name where required.
# INTERPOLATE => 0
<a href="http://[% server %]/[% help %]">
<img src="[% images %]/help.gif"></a>
[% myorg.name %]
# INTERPOLATE => 1
<a href="http://$server/$help">
<img src="$images/help.gif"></a>
$myorg.name
# explicit scoping with { }
<img src="$images/${icon.next}.gif">
Note that a limitation in Perl's regex engine restricts the maximum
length of an interpolated template to around 32 kilobytes or possibly
less. Files that exceed this limit in size will typically cause Perl
to dump core with a segmentation fault. If you routinely process
templates of this size then you should disable INTERPOLATE or split
the templates in several smaller files or blocks which can then be
joined backed together via PROCESS or INCLUDE.
It is unknown whether this limitation is shared by the Python regex
engine.
* ANYCASE
By default, directive keywords should be expressed in UPPER CASE. The
ANYCASE option can be set to allow directive keywords to be specified
in any case.
# ANYCASE => 0 (default)
[% INCLUDE foobar %] # OK
[% include foobar %] # ERROR
[% include = 10 %] # OK, 'include' is a variable
# ANYCASE => 1
[% INCLUDE foobar %] # OK
[% include foobar %] # OK
[% include = 10 %] # ERROR, 'include' is reserved word
One side-effect of enabling ANYCASE is that you cannot use a variable
of the same name as a reserved word, regardless of case. The reserved
words are currently:
GET CALL SET DEFAULT INSERT INCLUDE PROCESS WRAPPER
IF UNLESS ELSE ELSIF FOR FOREACH WHILE SWITCH CASE
USE PLUGIN FILTER MACRO PYTHON RAWPYTHON BLOCK META
TRY THROW CATCH FINAL NEXT LAST BREAK RETURN STOP
CLEAR TO STEP AND OR NOT MOD DIV END
The only lower case reserved words that cannot be used for variables,
regardless of the ANYCASE option, are the operators:
and or not mod div
* V1DOLLAR
In version 1 of the Template Toolkit, an optional leading '$' could be placed
on any template variable and would be silently ignored.
# VERSION 1
[% $foo %] === [% foo %]
[% $hash.$key %] === [% hash.key %]
To interpolate a variable value the '${' ... '}' construct was used.
Typically, one would do this to index into a hash array when the key
value was stored in a variable.
example:
vars = {
users => {
'aba': { 'name': 'Alan Aardvark', ... },
'abw': { 'name': 'Andy Wardley', ... },
...
},
'uid': 'aba',
...
}
template.process('user/home.html', vars)
'user/home.html':
[% user = users.${uid} %] # users.aba
Name: [% user.name %] # Alan Aardvark
This was inconsistent with double quoted strings and also the
INTERPOLATE mode, where a leading '$' in text was enough to indicate a
variable for interpolation, and the additional curly braces were used
to delimit variable names where necessary. Note that this use is
consistent with UNIX and Perl conventions, among others.
# double quoted string interpolation
[% name = "$title ${user.name}" %]
# INTERPOLATE = 1
<img src="$images/help.gif"></a>
<img src="$images/${icon.next}.gif">
For version 2, these inconsistencies have been removed and the syntax
clarified. A leading '$' on a variable is now used exclusively to
indicate that the variable name should be interpolated
(e.g. subsituted for its value) before being used. The earlier example
from version 1:
# VERSION 1
[% user = users.${uid} %]
Name: [% user.name %]
can now be simplified in version 2 as:
# VERSION 2
[% user = users.$uid %]
Name: [% user.name %]
The leading dollar is no longer ignored and has the same effect of
interpolation as '${' ... '}' in version 1. The curly braces may
still be used to explicitly scope the interpolated variable name
where necessary.
e.g.
[% user = users.${me.id} %]
Name: [% user.name %]
The rule applies for all variables, both within directives and in
plain text if processed with the INTERPOLATE option. This means that
you should no longer (if you ever did) add a leading '$' to a variable
inside a directive, unless you explicitly want it to be interpolated.
One obvious side-effect is that any version 1 templates with variables
using a leading '$' will no longer be processed as expected. Given
the following variable definitions,
[% foo = 'bar'
bar = 'baz'
%]
version 1 would interpret the following as:
# VERSION 1
[% $foo %] => [% GET foo %] => bar
whereas version 2 interprets it as:
# VERSION 2
[% $foo %] => [% GET $foo %] => [% GET bar %] => baz
In version 1, the '$' is ignored and the value for the variable 'foo'
is retrieved and printed. In version 2, the variable '$foo' is first
interpolated to give the variable name 'bar' whose value is then
retrieved and printed.
The use of the optional '$' has never been strongly recommended, but
to assist in backwards compatibility with any version 1 templates that
may rely on this "feature", the V1DOLLAR option can be set to 1
(default: 0) to revert the behaviour and have leading '$' characters
ignored.
parser = template.parser.Parser->new({
'V1DOLLAR': 1,
});
* GRAMMAR
The GRAMMAR configuration item can be used to specify an alternate
grammar for the parser. This allows a modified or entirely new
template language to be constructed and used by the Template Toolkit.
Source templates are compiled to Python code by the template.parser
module using the template.grammar module (by default) to define the
language structure and semantics. Compiled templates are thus
inherently "compatible" with each other and there is nothing to prevent
any number of different template languages being compiled and used within
the same Template Toolkit processing environment (other than the usual
time and memory constraints).
The template.grammar file is constructed from a YACC like grammar
(using Parse::YAPP) and a skeleton module template. These files are
provided, along with a small script to rebuild the grammar, in the
'parser' sub-directory of the distribution. You don't have to know or
worry about these unless you want to hack on the template language or
define your own variant. There is a README file in the same directory
which provides some small guidance but it is assumed that you know
what you're doing if you venture herein. If you grok LALR parsers,
then you should find it comfortably familiar.
By default, an instance of the default template.grammar.Grammar will
be created and used automatically if a GRAMMAR item isn't specified.
import myorg.template.grammar
parser = template.parser.Parser({
'GRAMMAR': myorg.template.grammar.Grammar(),
})
* DEBUG
The DEBUG option can be used to enable various debugging features of
the Template::Parser module.
from template.constants import *
tt = template.Template({
'DEBUG': DEBUG_PARSER | DEBUG_DIRS,
})
The DEBUG value can include any of the following. Multiple values
should be combined using the logical OR operator, '|'.
** DEBUG_PARSER
This flag causes the Parser to generate debugging messages that show
the Python code generated by parsing and compiling each template.
** DEBUG_DIRS
This option causes the Template Toolkit to generate comments
indicating the source file, line and original text of each directive
in the template. These comments are embedded in the template output
using the format defined in the DEBUG_FORMAT configuration item, or a
simple default format if unspecified.
For example, the following template fragment:
Hello World
would generate this output:
## input text line 1 : ##
Hello
## input text line 2 : World ##
World
parse(text)
The parse() method parses the text passed in the first parameter and
returns a dictionary of data defining the compiled representation of
the template text, suitable for passing to the
template.document.Document constructor.
Example:
data = parser.parse(text)
The data dictionary returned contains a BLOCK item containing the
compiled Python code for the template, a DEFBLOCKS item containing a
dictionary of sub-template BLOCKs defined within in the template, and
a METADATA item containing a dictionary of metadata values defined in
META tags.
"""
CONTINUE = 0
ACCEPT = 1
ERROR = 2
ABORT = 3
TAG_STYLE = {
"default": (r"\[%", r"%\]"),
"template1": (r"[[%]%", r"%[]%]"),
"metatext": (r"%%", r"%%"),
"html": (r"<!--", r"-->"),
"mason": (r"<%", r">"),
"asp": (r"<%", r"%>"),
"php": (r"<\?", r"\?>"),
"star": (r"\[\*", r"\*\]"),
}
TAG_STYLE["template"] = TAG_STYLE["tt2"] = TAG_STYLE["default"]
DEFAULT_STYLE = {
"START_TAG": TAG_STYLE["default"][0],
"END_TAG": TAG_STYLE["default"][1],
"ANYCASE": 0,
"INTERPOLATE": 0,
"PRE_CHOMP": 0,
"POST_CHOMP": 0,
"V1DOLLAR": 0,
"EVAL_PYTHON": 0,
}
ESCAPE = {"n": "\n", "r": "\r", "t": "\t"}
CHOMP_FLAGS = r"[-=~+]"
CHOMP_ALL = str(CHOMP_ALL)
CHOMP_COLLAPSE = str(CHOMP_COLLAPSE)
CHOMP_GREEDY = str(CHOMP_GREEDY)
CHOMP_NONE = str(CHOMP_NONE)
CHOMP_CONST = {
"-": CHOMP_ALL,
"=": CHOMP_COLLAPSE,
"~": CHOMP_GREEDY,
"+": CHOMP_NONE
}
PRE_CHOMP = {
CHOMP_ALL: lambda x: re.sub(r"(\n|^)[^\S\n]*\Z", "", x),
CHOMP_COLLAPSE: lambda x: re.sub(r"\s+\Z", " ", x),
CHOMP_GREEDY: lambda x: re.sub(r"\s+\Z", "", x),
CHOMP_NONE: lambda x: x,
}
def postchomp(regex, prefix):
regex = re.compile(regex)
def strip(text, postlines):
match = regex.match(text)
if match:
text = prefix + text[match.end():]
postlines += match.group().count("\n")
return text, postlines
return strip
POST_CHOMP = {
CHOMP_ALL: postchomp(r"[^\S\n]*\n", ""),
CHOMP_COLLAPSE: postchomp(r"\s+", " "),
CHOMP_GREEDY: postchomp(r"\s+", ""),
CHOMP_NONE: lambda x, y: (x, y),
}
def Chomp(x):
return re.sub(r"[-=~+]", lambda m: CHOMP_CONST[m.group()], str(x))
GRAMMAR = re.compile(r"""
# strip out any comments
(\#[^\n]*)
|
# a quoted string matches in $3
(["']) # $2 - opening quote, ' or "
( # $3 - quoted text buffer
(?: # repeat group (no backreference)
\\\\ # an escaped backslash
| # ...or...
\\\2 # an escaped quote \" or \' (match $1)
| # ...or...
. # any other character
| \n
)*? # non-greedy repeat
) # end of $3
\2 # match opening quote
|
# an unquoted number matches in $4
(-? \d+ (?: \. \d+ )?) # numbers
|
# filename matches in $5
( /? \w+ (?: (?: /|::? ) \w* )+ | /\w+ )
|
# an identifier matches in $6
(\w+)
|
# an unquoted word or symbol matches in $7
( [(){}\[\]:;,/\\] # misc parentheses and symbols
| -> # arrow operator (for future?)
| [-+*] # math operations
| \${? # dollar with optional left brace
| => # like "="
| [=!<>]?= | [!<>] # equality tests
| &&? | \|\|? # boolean ops
| \.\.? # n..n sequence
| \S+ # something unquoted
) # end of $7
""", re.VERBOSE)
QUOTED_STRING = re.compile(r"""
( (?: \\. | [^\$] ){1,3000} ) # escaped or non-'$' character [$1]
|
( \$ (?: # embedded variable [$2]
(?: \{ ([^\}]*) \} ) # ${ ... } [$3]
|
([\w\.]+) # $word [$4]
)
)
""", re.VERBOSE)
class Error(Exception):
"""A trivial local exception class."""
pass
class Parser:
"""This module implements a LALR(1) parser and assocated support
methods to parse template documents into the appropriate "compiled"
format.
"""
def __init__(self, param):
self.start_tag = param.get("START_TAG") or DEFAULT_STYLE["START_TAG"]
self.end_tag = param.get("END_TAG") or DEFAULT_STYLE["END_TAG"]
self.tag_style = param.get("TAG_STYLE", "default")
self.anycase = param.get("ANYCASE", False)
self.interpolate = param.get("INTERPOLATE", False)
self.pre_chomp = param.get("PRE_CHOMP", CHOMP_NONE)
self.post_chomp = param.get("POST_CHOMP", CHOMP_NONE)
self.v1dollar = param.get("V1DOLLAR", False)
self.eval_python = param.get("EVAL_PYTHON", False)
self.file_info = param.get("FILE_INFO", 1)
self.grammar = param.get("GRAMMAR", Grammar())
self.factory = param.get("FACTORY", Directive)
self.fileinfo = []
self.defblocks = []
self.defblock_stack = []
self.infor = 0
self.inwhile = 0
self.style = []
# Build a FACTORY object to include any NAMESPACE definitions,
# but only if FACTORY isn't already a (non-callable) object.
if isinstance(self.factory, collections.Callable):
self.factory = self.factory(param)
self.lextable = self.grammar.lextable
self.states = self.grammar.states
self.rules = self.grammar.rules
self.new_style(param)
self.tokenize = (
((1,), self._comment),
((2, 3), self._string),
((4,), self._number),
((5,), self._filename),
((6,), self._identifier),
((7,), self._word),
)
def new_style(self, config):
"""Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc.
"""
if self.style:
style = self.style[-1]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get("TAG_STYLE")
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if tags is None:
raise Error("Invalid tag style: %s" % tagstyle)
start, end = tags
config["START_TAG"] = config.get("START_TAG", start)
config["END_TAG"] = config.get("END_TAG", end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if value is not None:
style[key] = value
self.style.append(style)
return style
def old_style(self):
"""Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
"""
if len(self.style) <= 1:
raise Error("only 1 parser style remaining")
self.style.pop()
return self.style[-1]
def location(self):
"""Return Python comment indicating current parser file and line."""
if not self.file_info:
return "\n"
line = self.line
info = self.fileinfo[-1]
file = info and (info.path or info.name) or "(unknown template)"
line = re.sub(r"-.*", "", str(line)) # might be 'n-n'
return '#line %s "%s"\n' % (line, file)
def parse(self, text, info=None):
"""Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document.
"""
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if tokens is None:
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {"BLOCK": block,
"DEFBLOCKS": self.defblock,
"METADATA": self.metadata}
else:
return None
def split_text(self, text):
"""Split input template text into directives and raw text chunks."""
tokens = []
line = 1
style = self.style[-1]
def make_splitter(delims):
return re.compile(r"(?s)(.*?)%s(.*?)%s" % delims)
splitter = make_splitter((style["START_TAG"], style["END_TAG"]))
while True:
match = splitter.match(text)
if not match:
break
text = text[match.end():]
pre, dir = match.group(1), match.group(2)
prelines = pre.count("\n")
dirlines = dir.count("\n")
postlines = 0
if dir.startswith("#"):
# commment out entire directive except for any end chomp flag
match = re.search(CHOMP_FLAGS + "$", dir)
if match:
dir = match.group()
else:
dir = ""
else:
# PRE_CHOMP: process whitespace before tag
match = re.match(r"(%s)?\s*" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["PRE_CHOMP"])
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
# POST_CHOMP: process whitespace after tag
match = re.search(r"\s*(%s)?\s*$" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["POST_CHOMP"])
if match:
dir = dir[:match.start()]
text, postlines = POST_CHOMP[chomp](text, postlines)
if pre:
if style["INTERPOLATE"]:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(["TEXT", pre])
line += prelines
if dir:
# The TAGS directive is a compile-time switch.
match = re.match(r"(?i)TAGS\s+(.*)", dir)
if match:
tags = re.split(r"\s+", match.group(1))
if len(tags) > 1:
splitter = make_splitter(tuple(re.escape(x) for x in tags[:2]))
elif tags[0] in TAG_STYLE:
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write("Invalid TAGS style: %s" % tags[0])
else:
if dirlines > 0:
line_range = "%d-%d" % (line, line + dirlines)
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += dirlines + postlines
if text:
if style["INTERPOLATE"]:
tokens.append([text, line, "ITEXT"])
else:
tokens.extend(["TEXT", text])
return tokens
def _comment(self, token):
"""Tokenizes a comment."""
return ()
def _string(self, quote, token):
"""Tokenizes a string."""
if quote == '"':
if re.search(r"[$\\]", token):
# unescape " and \ but leave \$ escaped so that
# interpolate_text() doesn't incorrectly treat it
# as a variable reference
token = re.sub(r'\\([\\"])', r'\1', token)
token = re.sub(r'\\([^$nrt])', r'\1', token)
token = re.sub(r'\\([nrt])', lambda m: ESCAPE[m.group(1)], token)
return ['"', '"'] + self.interpolate_text(token) + ['"', '"']
else:
return "LITERAL", "scalar(%r)" % token
else:
# Remove escaped single quotes and backslashes:
token = re.sub(r"\\(.)", lambda m: m.group(m.group(1) in "'\\"), token)
return "LITERAL", "scalar(%r)" % token
def _number(self, token):
"""Tokenizes a number."""
return "NUMBER", "scalar(%s)" % token
def _filename(self, token):
"""Tokenizes a filename."""
return "FILENAME", token
def _identifier(self, token):
"""Tokenizes an identifier."""
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if toktype is not None:
return toktype, uctoken
else:
return "IDENT", token
def _word(self, token):
"""Tokenizes an unquoted word or symbol ."""
return self.lextable.get(token, "UNQUOTED"), token
def tokenise_directive(self, dirtext):
"""Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text.
"""
tokens = []
for match in GRAMMAR.finditer(dirtext):
for indices, method in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens
def _parse(self, tokens, info):
"""Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details.
"""
self.grammar.install_factory(self.factory)
stack = [[0, None]] # DFA stack
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = info and info.name
self.inpython = 0
value = None
while True:
stateno = stack[-1][0]
state = self.states[stateno]
# see if any lookaheads exist for the current state
if "ACTIONS" in state:
# get next token and expand any directives (ie. token is a
# list) onto the front of the token list
while token is None and tokens:
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
text, self.line, token = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = token + [";", ";"]
token = None # force redo
elif token == "ITEXT":
if in_python:
# don't perform interpolation in PYTHON blocks
token = "TEXT"
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None # force redo
else:
# toggle string flag to indicate if we're crossing
# a string boundary
if token == '"':
in_string = not in_string
value = tokens and tokens.pop(0) or None
if token is None:
token = ""
# get the next state for the current lookahead token
lookup = state["ACTIONS"].get(token)
if lookup:
action = lookup
else:
action = state.get("DEFAULT")
else:
# no lookahead assertions
action = state.get("DEFAULT")
# ERROR: no ACTION
if action is None:
break
# shift (positive ACTION)
if action > 0:
stack.append([action, value])
token = value = None
else:
# reduce (negative ACTION)
lhs, len_, code = self.rules[-action]
# no action implies ACCEPTance
if not action:
status = ACCEPT
# use dummy sub if code ref doesn't exist
if not code:
code = lambda *arg: len(arg) >= 2 and arg[1] or None
if len_ > 0:
codevars = [x[1] for x in stack[-len_:]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
# reduce stack by len_
if len_ > 0:
stack[-len_:] = []
# ACCEPT
if status == ACCEPT:
return coderet
elif status == ABORT:
return None
elif status == ERROR:
break
stack.append([self.states[stack[-1][0]].get("GOTOS", {}).get(lhs),
coderet])
# ERROR
if value is None:
self._parse_error("unexpected end of input", info.name)
elif value == ";":
self._parse_error("unexpected end of directive", info.name, text)
else:
self._parse_error("unexpected token (%s)" %
util.unscalar_lex(value), info.name, text)
def _parse_error(self, msg, name, text=None):
"""Method used to handle errors encountered during the parse process
in the _parse() method.
"""
line = self.line or "unknown"
if text is not None:
msg += "\n [%% %s %%]" % text
raise TemplateException("parse", "%s line %s: %s" % (name, line, msg))
def define_block(self, name, block):
"""Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
"""
if self.defblock is None:
return None
self.defblock[name] = block
return None
def push_defblock(self):
self.defblock_stack.append(self.defblock)
self.defblock = {}
def pop_defblock(self):
if not self.defblock_stack:
return self.defblock
block = self.defblock
self.defblock = self.defblock_stack.pop(0)
return block
def add_metadata(self, setlist):
setlist = [util.unscalar_lex(x) for x in setlist]
if self.metadata is not None:
for key, value in util.chop(setlist, 2):
self.metadata[key] = value
return None
def interpolate_text(self, text, line=0):
"""Examines text looking for any variable references embedded
like $this or like ${ this }.
"""
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = match.group(3) or match.group(4)
dir = match.group(2)
# preceding text
if pre:
line += pre.count("\n")
tokens.extend(("TEXT", pre.replace("\\$", "$")))
# variable reference
if var:
line += dir.count("\n")
tokens.append([dir, line, self.tokenise_directive(var)])
# other '$' reference - treated as text
elif dir:
line += dir.count("\n")
tokens.extend(("TEXT", dir))
return tokens
|
mocked_today
|
Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'.
|
# -*- coding: utf-8 -*-
from odoo import fields
from odoo.tests.common import Form, SavepointCase
from odoo.tests import tagged
from contextlib import contextmanager
from unittest.mock import patch
import datetime
@tagged('post_install', '-at_install')
class AccountTestInvoicingCommon(SavepointCase):
@classmethod
def copy_account(cls, account):
suffix_nb = 1
while True:
new_code = '%s (%s)' % (account.code, suffix_nb)
if account.search_count([('company_id', '=', account.company_id.id), ('code', '=', new_code)]):
suffix_nb += 1
else:
return account.copy(default={'code': new_code})
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass()
if chart_template_ref:
chart_template = cls.env.ref(chart_template_ref)
else:
chart_template = cls.env.ref('l10n_generic_coa.configurable_chart_template', raise_if_not_found=False)
if not chart_template:
cls.tearDownClass()
# skipTest raises exception
cls.skipTest(cls, "Accounting Tests skipped because the user's company has no chart of accounts.")
# Create user.
user = cls.env['res.users'].create({
'name': 'Because I am accountman!',
'login': 'accountman',
'groups_id': [(6, 0, cls.env.user.groups_id.ids), (4, cls.env.ref('account.group_account_user').id)],
})
user.partner_id.email = '[email protected]'
# Shadow the current environment/cursor with one having the report user.
# This is mandatory to test access rights.
cls.env = cls.env(user=user)
cls.cr = cls.env.cr
cls.company_data_2 = cls.setup_company_data('company_2_data', chart_template)
cls.company_data = cls.setup_company_data('company_1_data', chart_template)
user.write({
'company_ids': [(6, 0, (cls.company_data['company'] + cls.company_data_2['company']).ids)],
'company_id': cls.company_data['company'].id,
})
cls.currency_data = cls.setup_multi_currency_data()
# ==== Taxes ====
cls.tax_sale_a = cls.company_data['default_tax_sale']
cls.tax_sale_b = cls.company_data['default_tax_sale'].copy()
cls.tax_purchase_a = cls.company_data['default_tax_purchase']
cls.tax_purchase_b = cls.company_data['default_tax_purchase'].copy()
cls.tax_armageddon = cls.setup_armageddon_tax('complex_tax', cls.company_data)
# ==== Products ====
cls.product_a = cls.env['product.product'].create({
'name': 'product_a',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'lst_price': 1000.0,
'standard_price': 800.0,
'property_account_income_id': cls.company_data['default_account_revenue'].id,
'property_account_expense_id': cls.company_data['default_account_expense'].id,
'taxes_id': [(6, 0, cls.tax_sale_a.ids)],
'supplier_taxes_id': [(6, 0, cls.tax_purchase_a.ids)],
})
cls.product_b = cls.env['product.product'].create({
'name': 'product_b',
'uom_id': cls.env.ref('uom.product_uom_dozen').id,
'lst_price': 200.0,
'standard_price': 160.0,
'property_account_income_id': cls.copy_account(cls.company_data['default_account_revenue']).id,
'property_account_expense_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'taxes_id': [(6, 0, (cls.tax_sale_a + cls.tax_sale_b).ids)],
'supplier_taxes_id': [(6, 0, (cls.tax_purchase_a + cls.tax_purchase_b).ids)],
})
# ==== Fiscal positions ====
cls.fiscal_pos_a = cls.env['account.fiscal.position'].create({
'name': 'fiscal_pos_a',
'tax_ids': [
(0, None, {
'tax_src_id': cls.tax_sale_a.id,
'tax_dest_id': cls.tax_sale_b.id,
}),
(0, None, {
'tax_src_id': cls.tax_purchase_a.id,
'tax_dest_id': cls.tax_purchase_b.id,
}),
],
'account_ids': [
(0, None, {
'account_src_id': cls.product_a.property_account_income_id.id,
'account_dest_id': cls.product_b.property_account_income_id.id,
}),
(0, None, {
'account_src_id': cls.product_a.property_account_expense_id.id,
'account_dest_id': cls.product_b.property_account_expense_id.id,
}),
],
})
# ==== Payment terms ====
cls.pay_terms_a = cls.env.ref('account.account_payment_term_immediate')
cls.pay_terms_b = cls.env['account.payment.term'].create({
'name': '30% Advance End of Following Month',
'note': 'Payment terms: 30% Advance End of Following Month',
'line_ids': [
(0, 0, {
'value': 'percent',
'value_amount': 30.0,
'sequence': 400,
'days': 0,
'option': 'day_after_invoice_date',
}),
(0, 0, {
'value': 'balance',
'value_amount': 0.0,
'sequence': 500,
'days': 31,
'option': 'day_following_month',
}),
],
})
# ==== Partners ====
cls.partner_a = cls.env['res.partner'].create({
'name': 'partner_a',
'property_payment_term_id': cls.pay_terms_a.id,
'property_supplier_payment_term_id': cls.pay_terms_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].id,
'property_account_payable_id': cls.company_data['default_account_payable'].id,
'company_id': False,
})
cls.partner_b = cls.env['res.partner'].create({
'name': 'partner_b',
'property_payment_term_id': cls.pay_terms_b.id,
'property_supplier_payment_term_id': cls.pay_terms_b.id,
'property_account_position_id': cls.fiscal_pos_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].copy().id,
'property_account_payable_id': cls.company_data['default_account_payable'].copy().id,
'company_id': False,
})
# ==== Cash rounding ====
cls.cash_rounding_a = cls.env['account.cash.rounding'].create({
'name': 'add_invoice_line',
'rounding': 0.05,
'strategy': 'add_invoice_line',
'account_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'rounding_method': 'UP',
})
cls.cash_rounding_b = cls.env['account.cash.rounding'].create({
'name': 'biggest_tax',
'rounding': 0.05,
'strategy': 'biggest_tax',
'rounding_method': 'DOWN',
})
@classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
''' Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing.
'''
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = [('company_id', '=', company.id)] + domain
account = None
if template_code:
account = cls.env['account.account'].search(domain + [('code', '=like', template_code + '%')], limit=1)
if not account:
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({
'name': company_name,
'currency_id': currency.id,
**kwargs,
})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
# The currency could be different after the installation of the chart template.
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {
'company': company,
'currency': company.currency_id,
'default_account_revenue': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)
], limit=1),
'default_account_expense': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)
], limit=1),
'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [
('user_type_id.type', '=', 'receivable')
]),
'default_account_payable': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id.type', '=', 'payable')
], limit=1),
'default_account_assets': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)
], limit=1),
'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_journal_misc': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'general')
], limit=1),
'default_journal_sale': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'sale')
], limit=1),
'default_journal_purchase': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'purchase')
], limit=1),
'default_journal_bank': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'bank')
], limit=1),
'default_journal_cash': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'cash')
], limit=1),
'default_tax_sale': company.account_sale_tax_id,
'default_tax_purchase': company.account_purchase_tax_id,
}
@classmethod
def setup_multi_currency_data(cls, default_values={}, rate2016=3.0, rate2017=2.0):
foreign_currency = cls.env['res.currency'].create({
'name': 'Gold Coin',
'symbol': '☺',
'rounding': 0.001,
'position': 'after',
'currency_unit_label': 'Gold',
'currency_subunit_label': 'Silver',
**default_values,
})
rate1 = cls.env['res.currency.rate'].create({
'name': '2016-01-01',
'rate': rate2016,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
rate2 = cls.env['res.currency.rate'].create({
'name': '2017-01-01',
'rate': rate2017,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
return {
'currency': foreign_currency,
'rates': rate1 + rate2,
}
@classmethod
def setup_armageddon_tax(cls, tax_name, company_data):
return cls.env['account.tax'].create({
'name': '%s (group)' % tax_name,
'amount_type': 'group',
'amount': 0.0,
'children_tax_ids': [
(0, 0, {
'name': '%s (child 1)' % tax_name,
'amount_type': 'percent',
'amount': 20.0,
'price_include': True,
'include_base_amount': True,
'tax_exigibility': 'on_invoice',
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
}),
(0, 0, {
'name': '%s (child 2)' % tax_name,
'amount_type': 'percent',
'amount': 10.0,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': company_data['default_account_tax_sale'].copy().id,
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
}),
],
})
@classmethod
def init_invoice(cls, move_type, partner=None, invoice_date=None):
move_form = Form(cls.env['account.move'].with_context(default_type=move_type))
move_form.invoice_date = invoice_date or fields.Date.from_string('2019-01-01')
move_form.partner_id = partner or cls.partner_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_b
return move_form.save()
def assertInvoiceValues(self, move, expected_lines_values, expected_move_values):
def sort_lines(lines):
return lines.sorted(lambda line: (line.exclude_from_invoice_tab, not bool(line.tax_line_id), line.name or '', line.balance))
self.assertRecordValues(sort_lines(move.line_ids.sorted()), expected_lines_values)
self.assertRecordValues(sort_lines(move.invoice_line_ids.sorted()), expected_lines_values[:len(move.invoice_line_ids)])
self.assertRecordValues(move, [expected_move_values])
# MASKED: mocked_today function (lines 380-403)
class AccountingSavepointCase(AccountTestInvoicingCommon):
# Ensure the backward-compatibility before saas-13.2.
pass
|
@contextmanager
def mocked_today(self, forced_today):
''' Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'.
'''
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
yield
| 380 | 403 |
# -*- coding: utf-8 -*-
from odoo import fields
from odoo.tests.common import Form, SavepointCase
from odoo.tests import tagged
from contextlib import contextmanager
from unittest.mock import patch
import datetime
@tagged('post_install', '-at_install')
class AccountTestInvoicingCommon(SavepointCase):
@classmethod
def copy_account(cls, account):
suffix_nb = 1
while True:
new_code = '%s (%s)' % (account.code, suffix_nb)
if account.search_count([('company_id', '=', account.company_id.id), ('code', '=', new_code)]):
suffix_nb += 1
else:
return account.copy(default={'code': new_code})
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass()
if chart_template_ref:
chart_template = cls.env.ref(chart_template_ref)
else:
chart_template = cls.env.ref('l10n_generic_coa.configurable_chart_template', raise_if_not_found=False)
if not chart_template:
cls.tearDownClass()
# skipTest raises exception
cls.skipTest(cls, "Accounting Tests skipped because the user's company has no chart of accounts.")
# Create user.
user = cls.env['res.users'].create({
'name': 'Because I am accountman!',
'login': 'accountman',
'groups_id': [(6, 0, cls.env.user.groups_id.ids), (4, cls.env.ref('account.group_account_user').id)],
})
user.partner_id.email = '[email protected]'
# Shadow the current environment/cursor with one having the report user.
# This is mandatory to test access rights.
cls.env = cls.env(user=user)
cls.cr = cls.env.cr
cls.company_data_2 = cls.setup_company_data('company_2_data', chart_template)
cls.company_data = cls.setup_company_data('company_1_data', chart_template)
user.write({
'company_ids': [(6, 0, (cls.company_data['company'] + cls.company_data_2['company']).ids)],
'company_id': cls.company_data['company'].id,
})
cls.currency_data = cls.setup_multi_currency_data()
# ==== Taxes ====
cls.tax_sale_a = cls.company_data['default_tax_sale']
cls.tax_sale_b = cls.company_data['default_tax_sale'].copy()
cls.tax_purchase_a = cls.company_data['default_tax_purchase']
cls.tax_purchase_b = cls.company_data['default_tax_purchase'].copy()
cls.tax_armageddon = cls.setup_armageddon_tax('complex_tax', cls.company_data)
# ==== Products ====
cls.product_a = cls.env['product.product'].create({
'name': 'product_a',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'lst_price': 1000.0,
'standard_price': 800.0,
'property_account_income_id': cls.company_data['default_account_revenue'].id,
'property_account_expense_id': cls.company_data['default_account_expense'].id,
'taxes_id': [(6, 0, cls.tax_sale_a.ids)],
'supplier_taxes_id': [(6, 0, cls.tax_purchase_a.ids)],
})
cls.product_b = cls.env['product.product'].create({
'name': 'product_b',
'uom_id': cls.env.ref('uom.product_uom_dozen').id,
'lst_price': 200.0,
'standard_price': 160.0,
'property_account_income_id': cls.copy_account(cls.company_data['default_account_revenue']).id,
'property_account_expense_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'taxes_id': [(6, 0, (cls.tax_sale_a + cls.tax_sale_b).ids)],
'supplier_taxes_id': [(6, 0, (cls.tax_purchase_a + cls.tax_purchase_b).ids)],
})
# ==== Fiscal positions ====
cls.fiscal_pos_a = cls.env['account.fiscal.position'].create({
'name': 'fiscal_pos_a',
'tax_ids': [
(0, None, {
'tax_src_id': cls.tax_sale_a.id,
'tax_dest_id': cls.tax_sale_b.id,
}),
(0, None, {
'tax_src_id': cls.tax_purchase_a.id,
'tax_dest_id': cls.tax_purchase_b.id,
}),
],
'account_ids': [
(0, None, {
'account_src_id': cls.product_a.property_account_income_id.id,
'account_dest_id': cls.product_b.property_account_income_id.id,
}),
(0, None, {
'account_src_id': cls.product_a.property_account_expense_id.id,
'account_dest_id': cls.product_b.property_account_expense_id.id,
}),
],
})
# ==== Payment terms ====
cls.pay_terms_a = cls.env.ref('account.account_payment_term_immediate')
cls.pay_terms_b = cls.env['account.payment.term'].create({
'name': '30% Advance End of Following Month',
'note': 'Payment terms: 30% Advance End of Following Month',
'line_ids': [
(0, 0, {
'value': 'percent',
'value_amount': 30.0,
'sequence': 400,
'days': 0,
'option': 'day_after_invoice_date',
}),
(0, 0, {
'value': 'balance',
'value_amount': 0.0,
'sequence': 500,
'days': 31,
'option': 'day_following_month',
}),
],
})
# ==== Partners ====
cls.partner_a = cls.env['res.partner'].create({
'name': 'partner_a',
'property_payment_term_id': cls.pay_terms_a.id,
'property_supplier_payment_term_id': cls.pay_terms_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].id,
'property_account_payable_id': cls.company_data['default_account_payable'].id,
'company_id': False,
})
cls.partner_b = cls.env['res.partner'].create({
'name': 'partner_b',
'property_payment_term_id': cls.pay_terms_b.id,
'property_supplier_payment_term_id': cls.pay_terms_b.id,
'property_account_position_id': cls.fiscal_pos_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].copy().id,
'property_account_payable_id': cls.company_data['default_account_payable'].copy().id,
'company_id': False,
})
# ==== Cash rounding ====
cls.cash_rounding_a = cls.env['account.cash.rounding'].create({
'name': 'add_invoice_line',
'rounding': 0.05,
'strategy': 'add_invoice_line',
'account_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'rounding_method': 'UP',
})
cls.cash_rounding_b = cls.env['account.cash.rounding'].create({
'name': 'biggest_tax',
'rounding': 0.05,
'strategy': 'biggest_tax',
'rounding_method': 'DOWN',
})
@classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
''' Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing.
'''
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = [('company_id', '=', company.id)] + domain
account = None
if template_code:
account = cls.env['account.account'].search(domain + [('code', '=like', template_code + '%')], limit=1)
if not account:
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({
'name': company_name,
'currency_id': currency.id,
**kwargs,
})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
# The currency could be different after the installation of the chart template.
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {
'company': company,
'currency': company.currency_id,
'default_account_revenue': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)
], limit=1),
'default_account_expense': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)
], limit=1),
'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [
('user_type_id.type', '=', 'receivable')
]),
'default_account_payable': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id.type', '=', 'payable')
], limit=1),
'default_account_assets': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)
], limit=1),
'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_journal_misc': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'general')
], limit=1),
'default_journal_sale': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'sale')
], limit=1),
'default_journal_purchase': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'purchase')
], limit=1),
'default_journal_bank': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'bank')
], limit=1),
'default_journal_cash': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'cash')
], limit=1),
'default_tax_sale': company.account_sale_tax_id,
'default_tax_purchase': company.account_purchase_tax_id,
}
@classmethod
def setup_multi_currency_data(cls, default_values={}, rate2016=3.0, rate2017=2.0):
foreign_currency = cls.env['res.currency'].create({
'name': 'Gold Coin',
'symbol': '☺',
'rounding': 0.001,
'position': 'after',
'currency_unit_label': 'Gold',
'currency_subunit_label': 'Silver',
**default_values,
})
rate1 = cls.env['res.currency.rate'].create({
'name': '2016-01-01',
'rate': rate2016,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
rate2 = cls.env['res.currency.rate'].create({
'name': '2017-01-01',
'rate': rate2017,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
return {
'currency': foreign_currency,
'rates': rate1 + rate2,
}
@classmethod
def setup_armageddon_tax(cls, tax_name, company_data):
return cls.env['account.tax'].create({
'name': '%s (group)' % tax_name,
'amount_type': 'group',
'amount': 0.0,
'children_tax_ids': [
(0, 0, {
'name': '%s (child 1)' % tax_name,
'amount_type': 'percent',
'amount': 20.0,
'price_include': True,
'include_base_amount': True,
'tax_exigibility': 'on_invoice',
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
}),
(0, 0, {
'name': '%s (child 2)' % tax_name,
'amount_type': 'percent',
'amount': 10.0,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': company_data['default_account_tax_sale'].copy().id,
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
}),
],
})
@classmethod
def init_invoice(cls, move_type, partner=None, invoice_date=None):
move_form = Form(cls.env['account.move'].with_context(default_type=move_type))
move_form.invoice_date = invoice_date or fields.Date.from_string('2019-01-01')
move_form.partner_id = partner or cls.partner_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_b
return move_form.save()
def assertInvoiceValues(self, move, expected_lines_values, expected_move_values):
def sort_lines(lines):
return lines.sorted(lambda line: (line.exclude_from_invoice_tab, not bool(line.tax_line_id), line.name or '', line.balance))
self.assertRecordValues(sort_lines(move.line_ids.sorted()), expected_lines_values)
self.assertRecordValues(sort_lines(move.invoice_line_ids.sorted()), expected_lines_values[:len(move.invoice_line_ids)])
self.assertRecordValues(move, [expected_move_values])
@contextmanager
def mocked_today(self, forced_today):
''' Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'.
'''
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
yield
class AccountingSavepointCase(AccountTestInvoicingCommon):
# Ensure the backward-compatibility before saas-13.2.
pass
|
_prometheus_module_metric_decorator
|
A Prometheus decorator adding timing metrics to a function.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
|
import asyncio
import functools
import logging
from types import FunctionType, ModuleType
from typing import Type
from prometheus_client import Histogram, Counter
logger = logging.getLogger(__name__)
H = Histogram(f"management_layer_call_duration_seconds", "API call duration (s)",
["call"])
# MASKED: _prometheus_module_metric_decorator function (lines 15-34)
def _prometheus_class_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function in a class.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=f.__name__).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def add_prometheus_metrics_for_module(module_: ModuleType):
"""
Convenience function applying the Prometheus metrics decorator to the
specified module's functions.
:param module_: The module to which the instrumentation will be applied
"""
decorate_all_in_module(module_, _prometheus_module_metric_decorator, [])
def add_prometheus_metrics_for_class(klass: Type):
"""
Convenience function applying the Prometheus metrics decorator to the
specified class functions.
:param klass: The class to which the instrumentation will be applied
"""
decorate_all_in_class(klass, _prometheus_class_metric_decorator, [])
def decorate_all_in_module(module_: ModuleType, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a module with the specified decorator
:param module_: The module to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(module_):
if name not in whitelist:
obj = getattr(module_, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
# We only check functions that are defined in the module we
# specified. Some of the functions in the module may have been
# imported from other modules. These are ignored.
if obj.__module__ == module_.__name__:
logger.debug(f"Adding metrics to {module_}:{name}")
setattr(module_, name, decorator(obj))
else:
logger.debug(f"No metrics on {module_}:{name} because it belongs to another "
f"module")
else:
logger.debug(f"No metrics on {module_}:{name} because it is not a coroutine or "
f"function")
def decorate_all_in_class(klass: Type, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a class with the specified decorator
:param klass: The class to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(klass):
if name not in whitelist:
obj = getattr(klass, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
logger.debug(f"Adding metrics to {klass}:{name}")
setattr(klass, name, decorator(obj))
else:
logger.debug(f"No metrics on {klass}:{name} because it is not a coroutine or "
f"function")
|
def _prometheus_module_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
module_ = f.__module__.split(".")[-1]
call_key = "{}_{}".format(module_, f.__name__)
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=call_key).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
| 15 | 34 |
import asyncio
import functools
import logging
from types import FunctionType, ModuleType
from typing import Type
from prometheus_client import Histogram, Counter
logger = logging.getLogger(__name__)
H = Histogram(f"management_layer_call_duration_seconds", "API call duration (s)",
["call"])
def _prometheus_module_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
module_ = f.__module__.split(".")[-1]
call_key = "{}_{}".format(module_, f.__name__)
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=call_key).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def _prometheus_class_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function in a class.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=f.__name__).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def add_prometheus_metrics_for_module(module_: ModuleType):
"""
Convenience function applying the Prometheus metrics decorator to the
specified module's functions.
:param module_: The module to which the instrumentation will be applied
"""
decorate_all_in_module(module_, _prometheus_module_metric_decorator, [])
def add_prometheus_metrics_for_class(klass: Type):
"""
Convenience function applying the Prometheus metrics decorator to the
specified class functions.
:param klass: The class to which the instrumentation will be applied
"""
decorate_all_in_class(klass, _prometheus_class_metric_decorator, [])
def decorate_all_in_module(module_: ModuleType, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a module with the specified decorator
:param module_: The module to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(module_):
if name not in whitelist:
obj = getattr(module_, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
# We only check functions that are defined in the module we
# specified. Some of the functions in the module may have been
# imported from other modules. These are ignored.
if obj.__module__ == module_.__name__:
logger.debug(f"Adding metrics to {module_}:{name}")
setattr(module_, name, decorator(obj))
else:
logger.debug(f"No metrics on {module_}:{name} because it belongs to another "
f"module")
else:
logger.debug(f"No metrics on {module_}:{name} because it is not a coroutine or "
f"function")
def decorate_all_in_class(klass: Type, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a class with the specified decorator
:param klass: The class to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(klass):
if name not in whitelist:
obj = getattr(klass, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
logger.debug(f"Adding metrics to {klass}:{name}")
setattr(klass, name, decorator(obj))
else:
logger.debug(f"No metrics on {klass}:{name} because it is not a coroutine or "
f"function")
|
seconds_to_timelimit
|
Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.
:param seconds: time limit in seconds
:returns: Slurm-notation time limit (hours:minutes:seconds)
|
# -*- coding: utf-8 -*-
"""Utilities for calculation job resources."""
__all__ = (
'get_default_options',
'seconds_to_timelimit',
)
def get_default_options(max_num_machines: int = 1, max_wallclock_seconds: int = 1800, with_mpi: bool = False) -> dict:
"""Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
"""
return {
'resources': {
'num_machines': int(max_num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': with_mpi,
}
# MASKED: seconds_to_timelimit function (lines 26-44)
|
def seconds_to_timelimit(seconds: int) -> str:
"""Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.
:param seconds: time limit in seconds
:returns: Slurm-notation time limit (hours:minutes:seconds)
"""
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
seconds -= minutes * 60
timelimit = ''
if days > 0:
timelimit += f'{days}-'
if hours > 0:
timelimit += f'{hours:02d}:'
timelimit += f'{minutes:02d}:{seconds:02d}'
return timelimit
| 26 | 44 |
# -*- coding: utf-8 -*-
"""Utilities for calculation job resources."""
__all__ = (
'get_default_options',
'seconds_to_timelimit',
)
def get_default_options(max_num_machines: int = 1, max_wallclock_seconds: int = 1800, with_mpi: bool = False) -> dict:
"""Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
"""
return {
'resources': {
'num_machines': int(max_num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': with_mpi,
}
def seconds_to_timelimit(seconds: int) -> str:
"""Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.
:param seconds: time limit in seconds
:returns: Slurm-notation time limit (hours:minutes:seconds)
"""
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
seconds -= minutes * 60
timelimit = ''
if days > 0:
timelimit += f'{days}-'
if hours > 0:
timelimit += f'{hours:02d}:'
timelimit += f'{minutes:02d}:{seconds:02d}'
return timelimit
|
__init__
|
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human-readable description of the field
(e.g. "The IP or domain name of the database server")
required_on_create -- If "true", the parameter is required on input stanza creation.
required_on_edit -- If "true", the parameter is required on input stanza modification.
Default values for required_on_create and required_on_edit match the
documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.
|
import json
import re
class FieldValidationException(Exception):
pass
class Field(object):
"""
This is the base class that should be used to create field validators. Sub-class this and override to_python if you
need custom validation.
"""
DATA_TYPE_STRING = 'string'
DATA_TYPE_NUMBER = 'number'
DATA_TYPE_BOOLEAN = 'boolean'
def get_data_type(self):
"""
Get the type of the field.
"""
return Field.DATA_TYPE_STRING
# MASKED: __init__ function (lines 26-57)
def to_python(self, value):
"""
Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid.
Arguments:
value -- The value to convert
"""
# No standard validation here; the modular input framework handles empty values.
return value
def to_string(self, value):
"""
Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is
invalid.
Arguments:
value -- The value to convert
"""
return str(value)
class BooleanField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value in [True, False]:
return value
elif str(value).strip().lower() in ["true", "t", "1"]:
return True
elif str(value).strip().lower() in ["false", "f", "0"]:
return False
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid boolean" % (str(value), self.name))
def to_string(self, value):
if value is True:
return "1"
elif value is False:
return "0"
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_BOOLEAN
class DelimitedField(Field):
def __init__(self, name, title, description, delim, required_on_create=True, required_on_edit=False):
super(DelimitedField, self).__init__(name, title, description, required_on_create, required_on_edit)
self._delim = delim
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = value.split(self._delim)
return tmp
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_STRING
class DurationField(Field):
"""
The duration field represents a duration as represented by a string such as 1d for a 24 hour period.
The string is converted to an integer indicating the number of seconds.
"""
DURATION_RE = re.compile(r"(?P<duration>[0-9]+)\s*(?P<units>[a-z]*)", re.IGNORECASE)
MINUTE = 60
HOUR = 3600
DAY = 86400
WEEK = 604800
UNITS = {
'w': WEEK, 'week': WEEK, 'd': DAY, 'day': DAY, 'h': HOUR, 'hour': HOUR, 'm': MINUTE, 'min': MINUTE, 'minute':
MINUTE, 's': 1}
def to_python(self, value):
Field.to_python(self, value)
# Parse the duration
m = DurationField.DURATION_RE.match(value)
# Make sure the duration could be parsed
if m is None:
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid duration" % (str(value), self.name))
# Get the units and duration
d = m.groupdict()
units = d['units']
# Parse the value provided
try:
duration = int(d['duration'])
except ValueError:
raise FieldValidationException(
"The duration '%s' for the '%s' parameter is not a valid number" % (d['duration'], self.name))
# Make sure the units are valid
if len(units) > 0 and units not in DurationField.UNITS:
raise FieldValidationException(
"The unit '%s' for the '%s' parameter is not a valid unit of duration" % (units, self.name))
# Convert the units to seconds
if len(units) > 0:
return duration * DurationField.UNITS[units]
else:
return duration
def to_string(self, value):
return str(value)
class FloatField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return float(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntegerField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return int(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntervalField(Field):
'''Class for handling Splunk's "interval" field, which typically accepts
an integer value OR a cron-style string. Note that this means that the
data type returned is a string, so the modular input must handle conversion
of this string to an integer at runtime.'''
# Accepted cron field formats:
# Asterisk: * (equivalent to first-last range)
# Lists: 1,2,3,4,5
# Ranges: 1-60
#
# and combinations of the above:
#
# Ranges followed by steps: 0-23/2
# Asterisks followed by steps: */2
#
# Note that we don't check explicitly for correct numeric values for each
# cron field.
cron_rx = re.compile(
r'''
(
\d{1,2} # A digit.
|\d{1,2}-\d{1,2} # A range.
|(\d{1,2},)+\d{1,2} # A list of digits.
|\d{1,2}-\d{1,2}/\d{1,2} # A range followed by a step.
|\* # The asterisk character.
|\*/\d{1,2} # An asterisk followed by a step.
)
''', re.VERBOSE)
def to_python(self, value):
try:
# Try parsing the string as an integer.
return int(value)
except ValueError:
# Try parsing the string as a cron schedule.
if self.parse_cron(value):
return value
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def get_data_type(self):
return Field.DATA_TYPE_STRING
def parse_cron(self, value):
'''Check for valid cron string.'''
fields = value.split()
if len(fields) == 5 and all([self.cron_rx.match(i) for i in fields]):
return True
return False
class JsonField(Field):
def to_python(self, value):
Field.to_python(self, value)
try:
return json.loads(value)
except (TypeError, ValueError):
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid JSON object" % (str(value), self.name))
def to_string(self, value):
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_STRING
class ListField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
return value.split(",")
else:
return []
def to_string(self, value):
if value is not None:
return ",".join(value)
return ""
class RangeField(Field):
def __init__(self, name, title, description, low, high, required_on_create=True, required_on_edit=False):
super(RangeField, self).__init__(name, title, description, required_on_create, required_on_edit)
self.low = low
self.high = high
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = int(value)
if tmp >= self.low and tmp <= self.high:
return tmp
else:
raise FieldValidationException("Value out of range.")
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class RegexField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return re.compile(value)
except Exception as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return value.pattern
return ""
class SeverityField(Field):
# Note: We ignore "FATAL" severity since Python's logging assigns it the
# same value as "CRITICAL".
SEVERITIES = {'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'ERROR': 40, 'CRITICAL': 50}
SEVERITIES_BY_INT = {v: k for k, v in SEVERITIES.items()}
def to_python(self, value):
try:
if value in SeverityField.SEVERITIES:
return SeverityField.SEVERITIES[value]
except AttributeError:
# Did not receive a string for some reason.
pass
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def to_string(self, value):
if value in SeverityField.SEVERITIES_BY_INT:
return SeverityField.SEVERITIES_BY_INT[value]
else:
raise ValueError('Invalid value provided for severity.')
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class VerbosityField(Field):
def to_python(self, value):
Field.to_python(self, value)
value = int(value)
if value is not None:
if value in [10, 20, 30, 40, 50]:
return value
else:
raise FieldValidationException('Invalid value provided for verbosity, must be one of the following: ' +
'{10, 20, 30, 40, 50}')
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
|
def __init__(self, name, title, description, required_on_create=True, required_on_edit=False):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human-readable description of the field
(e.g. "The IP or domain name of the database server")
required_on_create -- If "true", the parameter is required on input stanza creation.
required_on_edit -- If "true", the parameter is required on input stanza modification.
Default values for required_on_create and required_on_edit match the
documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.
"""
# Note: there is no distinction between a None value and blank value,
# as modular input UIs does not recognize such a distinction.
if name is None or len(name.strip()) == 0:
raise ValueError("The name parameter cannot be empty.")
if title is None or len(title.strip()) == 0:
raise ValueError("The title parameter cannot be empty.")
if description is None or len(description.strip()) == 0:
raise ValueError("The description parameter cannot be empty.")
self.name = name
self.title = title
self.description = description
self.required_on_create = required_on_create
self.required_on_edit = required_on_edit
| 26 | 57 |
import json
import re
class FieldValidationException(Exception):
pass
class Field(object):
"""
This is the base class that should be used to create field validators. Sub-class this and override to_python if you
need custom validation.
"""
DATA_TYPE_STRING = 'string'
DATA_TYPE_NUMBER = 'number'
DATA_TYPE_BOOLEAN = 'boolean'
def get_data_type(self):
"""
Get the type of the field.
"""
return Field.DATA_TYPE_STRING
def __init__(self, name, title, description, required_on_create=True, required_on_edit=False):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human-readable description of the field
(e.g. "The IP or domain name of the database server")
required_on_create -- If "true", the parameter is required on input stanza creation.
required_on_edit -- If "true", the parameter is required on input stanza modification.
Default values for required_on_create and required_on_edit match the
documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.
"""
# Note: there is no distinction between a None value and blank value,
# as modular input UIs does not recognize such a distinction.
if name is None or len(name.strip()) == 0:
raise ValueError("The name parameter cannot be empty.")
if title is None or len(title.strip()) == 0:
raise ValueError("The title parameter cannot be empty.")
if description is None or len(description.strip()) == 0:
raise ValueError("The description parameter cannot be empty.")
self.name = name
self.title = title
self.description = description
self.required_on_create = required_on_create
self.required_on_edit = required_on_edit
def to_python(self, value):
"""
Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid.
Arguments:
value -- The value to convert
"""
# No standard validation here; the modular input framework handles empty values.
return value
def to_string(self, value):
"""
Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is
invalid.
Arguments:
value -- The value to convert
"""
return str(value)
class BooleanField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value in [True, False]:
return value
elif str(value).strip().lower() in ["true", "t", "1"]:
return True
elif str(value).strip().lower() in ["false", "f", "0"]:
return False
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid boolean" % (str(value), self.name))
def to_string(self, value):
if value is True:
return "1"
elif value is False:
return "0"
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_BOOLEAN
class DelimitedField(Field):
def __init__(self, name, title, description, delim, required_on_create=True, required_on_edit=False):
super(DelimitedField, self).__init__(name, title, description, required_on_create, required_on_edit)
self._delim = delim
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = value.split(self._delim)
return tmp
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_STRING
class DurationField(Field):
"""
The duration field represents a duration as represented by a string such as 1d for a 24 hour period.
The string is converted to an integer indicating the number of seconds.
"""
DURATION_RE = re.compile(r"(?P<duration>[0-9]+)\s*(?P<units>[a-z]*)", re.IGNORECASE)
MINUTE = 60
HOUR = 3600
DAY = 86400
WEEK = 604800
UNITS = {
'w': WEEK, 'week': WEEK, 'd': DAY, 'day': DAY, 'h': HOUR, 'hour': HOUR, 'm': MINUTE, 'min': MINUTE, 'minute':
MINUTE, 's': 1}
def to_python(self, value):
Field.to_python(self, value)
# Parse the duration
m = DurationField.DURATION_RE.match(value)
# Make sure the duration could be parsed
if m is None:
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid duration" % (str(value), self.name))
# Get the units and duration
d = m.groupdict()
units = d['units']
# Parse the value provided
try:
duration = int(d['duration'])
except ValueError:
raise FieldValidationException(
"The duration '%s' for the '%s' parameter is not a valid number" % (d['duration'], self.name))
# Make sure the units are valid
if len(units) > 0 and units not in DurationField.UNITS:
raise FieldValidationException(
"The unit '%s' for the '%s' parameter is not a valid unit of duration" % (units, self.name))
# Convert the units to seconds
if len(units) > 0:
return duration * DurationField.UNITS[units]
else:
return duration
def to_string(self, value):
return str(value)
class FloatField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return float(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntegerField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return int(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntervalField(Field):
'''Class for handling Splunk's "interval" field, which typically accepts
an integer value OR a cron-style string. Note that this means that the
data type returned is a string, so the modular input must handle conversion
of this string to an integer at runtime.'''
# Accepted cron field formats:
# Asterisk: * (equivalent to first-last range)
# Lists: 1,2,3,4,5
# Ranges: 1-60
#
# and combinations of the above:
#
# Ranges followed by steps: 0-23/2
# Asterisks followed by steps: */2
#
# Note that we don't check explicitly for correct numeric values for each
# cron field.
cron_rx = re.compile(
r'''
(
\d{1,2} # A digit.
|\d{1,2}-\d{1,2} # A range.
|(\d{1,2},)+\d{1,2} # A list of digits.
|\d{1,2}-\d{1,2}/\d{1,2} # A range followed by a step.
|\* # The asterisk character.
|\*/\d{1,2} # An asterisk followed by a step.
)
''', re.VERBOSE)
def to_python(self, value):
try:
# Try parsing the string as an integer.
return int(value)
except ValueError:
# Try parsing the string as a cron schedule.
if self.parse_cron(value):
return value
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def get_data_type(self):
return Field.DATA_TYPE_STRING
def parse_cron(self, value):
'''Check for valid cron string.'''
fields = value.split()
if len(fields) == 5 and all([self.cron_rx.match(i) for i in fields]):
return True
return False
class JsonField(Field):
def to_python(self, value):
Field.to_python(self, value)
try:
return json.loads(value)
except (TypeError, ValueError):
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid JSON object" % (str(value), self.name))
def to_string(self, value):
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_STRING
class ListField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
return value.split(",")
else:
return []
def to_string(self, value):
if value is not None:
return ",".join(value)
return ""
class RangeField(Field):
def __init__(self, name, title, description, low, high, required_on_create=True, required_on_edit=False):
super(RangeField, self).__init__(name, title, description, required_on_create, required_on_edit)
self.low = low
self.high = high
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = int(value)
if tmp >= self.low and tmp <= self.high:
return tmp
else:
raise FieldValidationException("Value out of range.")
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class RegexField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return re.compile(value)
except Exception as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return value.pattern
return ""
class SeverityField(Field):
# Note: We ignore "FATAL" severity since Python's logging assigns it the
# same value as "CRITICAL".
SEVERITIES = {'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'ERROR': 40, 'CRITICAL': 50}
SEVERITIES_BY_INT = {v: k for k, v in SEVERITIES.items()}
def to_python(self, value):
try:
if value in SeverityField.SEVERITIES:
return SeverityField.SEVERITIES[value]
except AttributeError:
# Did not receive a string for some reason.
pass
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def to_string(self, value):
if value in SeverityField.SEVERITIES_BY_INT:
return SeverityField.SEVERITIES_BY_INT[value]
else:
raise ValueError('Invalid value provided for severity.')
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class VerbosityField(Field):
def to_python(self, value):
Field.to_python(self, value)
value = int(value)
if value is not None:
if value in [10, 20, 30, 40, 50]:
return value
else:
raise FieldValidationException('Invalid value provided for verbosity, must be one of the following: ' +
'{10, 20, 30, 40, 50}')
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
|
__init__
|
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
"""
from __future__ import with_statement
import logging
import itertools
import zlib
from gensim import utils
from six import iteritems, iterkeys
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
# MASKED: __init__ function (lines 70-102)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in iterkeys(result):
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in iteritems(self.dfs_debug) if word in ok}
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in iteritems(self.id2token)
}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if self.id2token.get(tokenid, set())}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving HashDictionary mapping to %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
|
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
| 70 | 102 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
"""
from __future__ import with_statement
import logging
import itertools
import zlib
from gensim import utils
from six import iteritems, iterkeys
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in iterkeys(result):
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in iteritems(self.dfs_debug) if word in ok}
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in iteritems(self.id2token)
}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if self.id2token.get(tokenid, set())}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving HashDictionary mapping to %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
|
restricted_hash
|
Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
"""
from __future__ import with_statement
import logging
import itertools
import zlib
from gensim import utils
from six import iteritems, iterkeys
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
"""
return self.id2token.get(tokenid, set())
# MASKED: restricted_hash function (lines 124-143)
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in iterkeys(result):
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in iteritems(self.dfs_debug) if word in ok}
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in iteritems(self.id2token)
}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if self.id2token.get(tokenid, set())}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving HashDictionary mapping to %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
|
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
| 124 | 143 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
"""
from __future__ import with_statement
import logging
import itertools
import zlib
from gensim import utils
from six import iteritems, iterkeys
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in iterkeys(result):
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in iteritems(self.dfs_debug) if word in ok}
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in iteritems(self.id2token)
}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if self.id2token.get(tokenid, set())}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving HashDictionary mapping to %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
|
apply_at
|
Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> first_sqr = apply_at(0, np.square)
>>> first_sqr([3, 2, 1])
>>> (9, 2, 1)
|
from typing import Callable, Iterable, Sequence
import numpy as np
from dpipe.im.axes import AxesLike, AxesParams
from dpipe.itertools import lmap, squeeze_first
from dpipe.im import pad_to_shape
def pad_batch_equal(batch, padding_values: AxesParams = 0, ratio: AxesParams = 0.5):
"""
Pad each element of ``batch`` to obtain a correctly shaped array.
References
----------
`pad_to_shape`
"""
max_shapes = np.max(lmap(np.shape, batch), axis=0)
# if not scalars
if max_shapes.size != 0:
batch = [pad_to_shape(x, max_shapes, padding_values=padding_values, ratio=ratio) for x in batch]
return np.array(batch)
def unpack_args(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and unpacks it while calling ``func``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> def add(x, y):
>>> return x + y
>>>
>>> add_ = unpack_args(add)
>>> add(1, 2) == add_([1, 2])
>>> True
"""
def wrapper(xs, *args_, **kwargs_):
return func(*xs, *args_, *args, **kwargs_, **kwargs)
return wrapper
def multiply(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and maps ``func`` over it.
Useful when multiple batches require the same function.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple:
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) for x in xs)
return wrapped
# MASKED: apply_at function (lines 61-83)
def zip_apply(*functions: Callable, **kwargs):
"""
Returns a function that takes an iterable and zips ``functions`` over it.
``kwargs`` are passed to each function as additional arguments.
Examples
--------
>>> zipper = zip_apply(np.square, np.sqrt)
>>> zipper([4, 9])
>>> (16, 3)
"""
def wrapped(xs: Sequence, *args, **kwargs_) -> tuple:
return tuple(func(x, *args, **kwargs_, **kwargs) for func, x in zip(functions, xs))
return wrapped
def random_apply(p: float, func: Callable, *args, **kwargs):
"""
Returns a function that applies ``func`` with a given probability ``p``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(*args_, **kwargs_):
if np.random.binomial(1, p):
return func(*args_, *args, **kwargs_, **kwargs)
return squeeze_first(args_)
return wrapped
def sample_args(func: Callable, *args: Callable, **kwargs: Callable):
"""
Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.
Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.
Examples
--------
>>> from scipy.ndimage import rotate
>>>
>>> random_rotate = sample_args(rotate, angle=np.random.normal)
>>> random_rotate(x)
>>> # same as
>>> rotate(x, angle=np.random.normal())
"""
def wrapped(*args_, **kwargs_):
return func(*args_, *([arg() for arg in args]), **kwargs_, **{name: arg() for name, arg in kwargs.items()})
return wrapped
|
def apply_at(index: AxesLike, func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> first_sqr = apply_at(0, np.square)
>>> first_sqr([3, 2, 1])
>>> (9, 2, 1)
"""
index = set(np.atleast_1d(index).tolist())
def wrapped(xs: Sequence, *args_, **kwargs_) -> tuple:
index_ = {i + len(xs) if i < 0 else i for i in index}
for idx in index_:
if idx < 0 or idx >= len(xs):
raise IndexError(f'Index {idx} out of bounds.')
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) if i in index_ else x for i, x in enumerate(xs))
return wrapped
| 61 | 83 |
from typing import Callable, Iterable, Sequence
import numpy as np
from dpipe.im.axes import AxesLike, AxesParams
from dpipe.itertools import lmap, squeeze_first
from dpipe.im import pad_to_shape
def pad_batch_equal(batch, padding_values: AxesParams = 0, ratio: AxesParams = 0.5):
"""
Pad each element of ``batch`` to obtain a correctly shaped array.
References
----------
`pad_to_shape`
"""
max_shapes = np.max(lmap(np.shape, batch), axis=0)
# if not scalars
if max_shapes.size != 0:
batch = [pad_to_shape(x, max_shapes, padding_values=padding_values, ratio=ratio) for x in batch]
return np.array(batch)
def unpack_args(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and unpacks it while calling ``func``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> def add(x, y):
>>> return x + y
>>>
>>> add_ = unpack_args(add)
>>> add(1, 2) == add_([1, 2])
>>> True
"""
def wrapper(xs, *args_, **kwargs_):
return func(*xs, *args_, *args, **kwargs_, **kwargs)
return wrapper
def multiply(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and maps ``func`` over it.
Useful when multiple batches require the same function.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple:
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) for x in xs)
return wrapped
def apply_at(index: AxesLike, func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> first_sqr = apply_at(0, np.square)
>>> first_sqr([3, 2, 1])
>>> (9, 2, 1)
"""
index = set(np.atleast_1d(index).tolist())
def wrapped(xs: Sequence, *args_, **kwargs_) -> tuple:
index_ = {i + len(xs) if i < 0 else i for i in index}
for idx in index_:
if idx < 0 or idx >= len(xs):
raise IndexError(f'Index {idx} out of bounds.')
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) if i in index_ else x for i, x in enumerate(xs))
return wrapped
def zip_apply(*functions: Callable, **kwargs):
"""
Returns a function that takes an iterable and zips ``functions`` over it.
``kwargs`` are passed to each function as additional arguments.
Examples
--------
>>> zipper = zip_apply(np.square, np.sqrt)
>>> zipper([4, 9])
>>> (16, 3)
"""
def wrapped(xs: Sequence, *args, **kwargs_) -> tuple:
return tuple(func(x, *args, **kwargs_, **kwargs) for func, x in zip(functions, xs))
return wrapped
def random_apply(p: float, func: Callable, *args, **kwargs):
"""
Returns a function that applies ``func`` with a given probability ``p``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(*args_, **kwargs_):
if np.random.binomial(1, p):
return func(*args_, *args, **kwargs_, **kwargs)
return squeeze_first(args_)
return wrapped
def sample_args(func: Callable, *args: Callable, **kwargs: Callable):
"""
Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.
Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.
Examples
--------
>>> from scipy.ndimage import rotate
>>>
>>> random_rotate = sample_args(rotate, angle=np.random.normal)
>>> random_rotate(x)
>>> # same as
>>> rotate(x, angle=np.random.normal())
"""
def wrapped(*args_, **kwargs_):
return func(*args_, *([arg() for arg in args]), **kwargs_, **{name: arg() for name, arg in kwargs.items()})
return wrapped
|
get_valid_arguments
|
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
# MASKED: get_valid_arguments function (lines 102-114)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
|
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
| 102 | 114 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
|
get_entry_point_from_string
|
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
# MASKED: get_entry_point_from_string function (lines 152-198)
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
|
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
| 152 | 198 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
|
convert
|
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
# MASKED: convert function (lines 200-220)
|
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
| 200 | 220 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
|
__init__
|
Initializes the GaussianSumQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
stddev: The stddev of the noise added to the sum.
|
# Copyright 2018, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements DPQuery interface for Gaussian average queries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from distutils.version import LooseVersion
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.dp_query import dp_query
from tensorflow_privacy.privacy.dp_query import normalized_query
class GaussianSumQuery(dp_query.SumAggregationDPQuery):
"""Implements DPQuery interface for Gaussian sum queries.
Accumulates clipped vectors, then adds Gaussian noise to the sum.
"""
# pylint: disable=invalid-name
_GlobalState = collections.namedtuple(
'_GlobalState', ['l2_norm_clip', 'stddev'])
# MASKED: __init__ function (lines 41-51)
def set_ledger(self, ledger):
self._ledger = ledger
def make_global_state(self, l2_norm_clip, stddev):
"""Creates a global state from the given parameters."""
return self._GlobalState(tf.cast(l2_norm_clip, tf.float32),
tf.cast(stddev, tf.float32))
def initial_global_state(self):
return self.make_global_state(self._l2_norm_clip, self._stddev)
def derive_sample_params(self, global_state):
return global_state.l2_norm_clip
def initial_sample_state(self, template):
return tf.nest.map_structure(
dp_query.zeros_like, template)
def preprocess_record_impl(self, params, record):
"""Clips the l2 norm, returning the clipped record and the l2 norm.
Args:
params: The parameters for the sample.
record: The record to be processed.
Returns:
A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
the structure of preprocessed tensors, and l2_norm is the total l2 norm
before clipping.
"""
l2_norm_clip = params
record_as_list = tf.nest.flatten(record)
clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)
return tf.nest.pack_sequence_as(record, clipped_as_list), norm
def preprocess_record(self, params, record):
preprocessed_record, _ = self.preprocess_record_impl(params, record)
return preprocessed_record
def get_noised_result(self, sample_state, global_state):
"""See base class."""
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
def add_noise(v):
return v + tf.random.normal(
tf.shape(input=v), stddev=global_state.stddev)
else:
random_normal = tf.random_normal_initializer(
stddev=global_state.stddev)
def add_noise(v):
return v + random_normal(tf.shape(input=v))
if self._ledger:
dependencies = [
self._ledger.record_sum_query(
global_state.l2_norm_clip, global_state.stddev)
]
else:
dependencies = []
with tf.control_dependencies(dependencies):
return tf.nest.map_structure(add_noise, sample_state), global_state
class GaussianAverageQuery(normalized_query.NormalizedQuery):
"""Implements DPQuery interface for Gaussian average queries.
Accumulates clipped vectors, adds Gaussian noise, and normalizes.
Note that we use "fixed-denominator" estimation: the denominator should be
specified as the expected number of records per sample. Accumulating the
denominator separately would also be possible but would be produce a higher
variance estimator.
"""
def __init__(self,
l2_norm_clip,
sum_stddev,
denominator):
"""Initializes the GaussianAverageQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
sum_stddev: The stddev of the noise added to the sum (before
normalization).
denominator: The normalization constant (applied after noise is added to
the sum).
"""
super(GaussianAverageQuery, self).__init__(
numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev),
denominator=denominator)
|
def __init__(self, l2_norm_clip, stddev):
"""Initializes the GaussianSumQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
stddev: The stddev of the noise added to the sum.
"""
self._l2_norm_clip = l2_norm_clip
self._stddev = stddev
self._ledger = None
| 41 | 51 |
# Copyright 2018, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements DPQuery interface for Gaussian average queries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from distutils.version import LooseVersion
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.dp_query import dp_query
from tensorflow_privacy.privacy.dp_query import normalized_query
class GaussianSumQuery(dp_query.SumAggregationDPQuery):
"""Implements DPQuery interface for Gaussian sum queries.
Accumulates clipped vectors, then adds Gaussian noise to the sum.
"""
# pylint: disable=invalid-name
_GlobalState = collections.namedtuple(
'_GlobalState', ['l2_norm_clip', 'stddev'])
def __init__(self, l2_norm_clip, stddev):
"""Initializes the GaussianSumQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
stddev: The stddev of the noise added to the sum.
"""
self._l2_norm_clip = l2_norm_clip
self._stddev = stddev
self._ledger = None
def set_ledger(self, ledger):
self._ledger = ledger
def make_global_state(self, l2_norm_clip, stddev):
"""Creates a global state from the given parameters."""
return self._GlobalState(tf.cast(l2_norm_clip, tf.float32),
tf.cast(stddev, tf.float32))
def initial_global_state(self):
return self.make_global_state(self._l2_norm_clip, self._stddev)
def derive_sample_params(self, global_state):
return global_state.l2_norm_clip
def initial_sample_state(self, template):
return tf.nest.map_structure(
dp_query.zeros_like, template)
def preprocess_record_impl(self, params, record):
"""Clips the l2 norm, returning the clipped record and the l2 norm.
Args:
params: The parameters for the sample.
record: The record to be processed.
Returns:
A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
the structure of preprocessed tensors, and l2_norm is the total l2 norm
before clipping.
"""
l2_norm_clip = params
record_as_list = tf.nest.flatten(record)
clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)
return tf.nest.pack_sequence_as(record, clipped_as_list), norm
def preprocess_record(self, params, record):
preprocessed_record, _ = self.preprocess_record_impl(params, record)
return preprocessed_record
def get_noised_result(self, sample_state, global_state):
"""See base class."""
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
def add_noise(v):
return v + tf.random.normal(
tf.shape(input=v), stddev=global_state.stddev)
else:
random_normal = tf.random_normal_initializer(
stddev=global_state.stddev)
def add_noise(v):
return v + random_normal(tf.shape(input=v))
if self._ledger:
dependencies = [
self._ledger.record_sum_query(
global_state.l2_norm_clip, global_state.stddev)
]
else:
dependencies = []
with tf.control_dependencies(dependencies):
return tf.nest.map_structure(add_noise, sample_state), global_state
class GaussianAverageQuery(normalized_query.NormalizedQuery):
"""Implements DPQuery interface for Gaussian average queries.
Accumulates clipped vectors, adds Gaussian noise, and normalizes.
Note that we use "fixed-denominator" estimation: the denominator should be
specified as the expected number of records per sample. Accumulating the
denominator separately would also be possible but would be produce a higher
variance estimator.
"""
def __init__(self,
l2_norm_clip,
sum_stddev,
denominator):
"""Initializes the GaussianAverageQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
sum_stddev: The stddev of the noise added to the sum (before
normalization).
denominator: The normalization constant (applied after noise is added to
the sum).
"""
super(GaussianAverageQuery, self).__init__(
numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev),
denominator=denominator)
|
forward
|
Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
|
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
# MASKED: forward function (lines 53-111)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
|
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
| 53 | 111 |
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
|
collect_feats
|
Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
|
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
# MASKED: collect_feats function (lines 113-145)
|
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
| 113 | 145 |
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
|
create_client
|
Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
# MASKED: create_client function (lines 322-399)
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
| 322 | 399 |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
run_proxy_in_background
|
Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
# MASKED: run_proxy_in_background function (lines 493-547)
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
| 493 | 547 |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
start_proxy
|
Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
# MASKED: start_proxy function (lines 605-715)
|
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
| 605 | 715 |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
start_background
|
Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
# MASKED: start_background function (lines 152-167)
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
| 152 | 167 |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
start
|
Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
# MASKED: start function (lines 169-178)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
| 169 | 178 |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
show_error
|
Show an error to the user and log it
Args:
msg (str): User message to print on the console
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
# MASKED: show_error function (lines 190-199)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
| 190 | 199 |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.