|
from collections.abc import Sequence |
|
|
|
import mmcv |
|
import numpy as np |
|
import torch |
|
from mmcv.parallel import DataContainer as DC |
|
|
|
from ..builder import PIPELINES |
|
|
|
|
|
def to_tensor(data): |
|
"""Convert objects of various python types to :obj:`torch.Tensor`. |
|
|
|
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, |
|
:class:`Sequence`, :class:`int` and :class:`float`. |
|
|
|
Args: |
|
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to |
|
be converted. |
|
""" |
|
|
|
if isinstance(data, torch.Tensor): |
|
return data |
|
elif isinstance(data, np.ndarray): |
|
return torch.from_numpy(data) |
|
elif isinstance(data, Sequence) and not mmcv.is_str(data): |
|
return torch.tensor(data) |
|
elif isinstance(data, int): |
|
return torch.LongTensor([data]) |
|
elif isinstance(data, float): |
|
return torch.FloatTensor([data]) |
|
else: |
|
raise TypeError(f'type {type(data)} cannot be converted to tensor.') |
|
|
|
|
|
@PIPELINES.register_module() |
|
class ToTensor(object): |
|
"""Convert some results to :obj:`torch.Tensor` by given keys. |
|
|
|
Args: |
|
keys (Sequence[str]): Keys that need to be converted to Tensor. |
|
""" |
|
|
|
def __init__(self, keys): |
|
self.keys = keys |
|
|
|
def __call__(self, results): |
|
"""Call function to convert data in results to :obj:`torch.Tensor`. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to convert. |
|
|
|
Returns: |
|
dict: The result dict contains the data converted |
|
to :obj:`torch.Tensor`. |
|
""" |
|
for key in self.keys: |
|
results[key] = to_tensor(results[key]) |
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + f'(keys={self.keys})' |
|
|
|
|
|
@PIPELINES.register_module() |
|
class ImageToTensor(object): |
|
"""Convert image to :obj:`torch.Tensor` by given keys. |
|
|
|
The dimension order of input image is (H, W, C). The pipeline will convert |
|
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be |
|
(1, H, W). |
|
|
|
Args: |
|
keys (Sequence[str]): Key of images to be converted to Tensor. |
|
""" |
|
|
|
def __init__(self, keys): |
|
self.keys = keys |
|
|
|
def __call__(self, results): |
|
"""Call function to convert image in results to :obj:`torch.Tensor` and |
|
transpose the channel order. |
|
|
|
Args: |
|
results (dict): Result dict contains the image data to convert. |
|
|
|
Returns: |
|
dict: The result dict contains the image converted |
|
to :obj:`torch.Tensor` and transposed to (C, H, W) order. |
|
""" |
|
for key in self.keys: |
|
img = results[key] |
|
if len(img.shape) < 3: |
|
img = np.expand_dims(img, -1) |
|
results[key] = to_tensor(img.transpose(2, 0, 1)) |
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + f'(keys={self.keys})' |
|
|
|
|
|
@PIPELINES.register_module() |
|
class Transpose(object): |
|
"""Transpose some results by given keys. |
|
|
|
Args: |
|
keys (Sequence[str]): Keys of results to be transposed. |
|
order (Sequence[int]): Order of transpose. |
|
""" |
|
|
|
def __init__(self, keys, order): |
|
self.keys = keys |
|
self.order = order |
|
|
|
def __call__(self, results): |
|
"""Call function to transpose the channel order of data in results. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to transpose. |
|
|
|
Returns: |
|
dict: The result dict contains the data transposed to \ |
|
``self.order``. |
|
""" |
|
for key in self.keys: |
|
results[key] = results[key].transpose(self.order) |
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + \ |
|
f'(keys={self.keys}, order={self.order})' |
|
|
|
|
|
@PIPELINES.register_module() |
|
class ToDataContainer(object): |
|
"""Convert results to :obj:`mmcv.DataContainer` by given fields. |
|
|
|
Args: |
|
fields (Sequence[dict]): Each field is a dict like |
|
``dict(key='xxx', **kwargs)``. The ``key`` in result will |
|
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. |
|
Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), |
|
dict(key='gt_labels'))``. |
|
""" |
|
|
|
def __init__(self, |
|
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), |
|
dict(key='gt_labels'))): |
|
self.fields = fields |
|
|
|
def __call__(self, results): |
|
"""Call function to convert data in results to |
|
:obj:`mmcv.DataContainer`. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to convert. |
|
|
|
Returns: |
|
dict: The result dict contains the data converted to \ |
|
:obj:`mmcv.DataContainer`. |
|
""" |
|
|
|
for field in self.fields: |
|
field = field.copy() |
|
key = field.pop('key') |
|
results[key] = DC(results[key], **field) |
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + f'(fields={self.fields})' |
|
|
|
|
|
@PIPELINES.register_module() |
|
class DefaultFormatBundle(object): |
|
"""Default formatting bundle. |
|
|
|
It simplifies the pipeline of formatting common fields, including "img", |
|
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". |
|
These fields are formatted as follows. |
|
|
|
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) |
|
- proposals: (1)to tensor, (2)to DataContainer |
|
- gt_bboxes: (1)to tensor, (2)to DataContainer |
|
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer |
|
- gt_labels: (1)to tensor, (2)to DataContainer |
|
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) |
|
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ |
|
(3)to DataContainer (stack=True) |
|
""" |
|
|
|
def __call__(self, results): |
|
"""Call function to transform and format common fields in results. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to convert. |
|
|
|
Returns: |
|
dict: The result dict contains the data that is formatted with \ |
|
default bundle. |
|
""" |
|
|
|
if 'img' in results: |
|
img = results['img'] |
|
|
|
results = self._add_default_meta_keys(results) |
|
if len(img.shape) < 3: |
|
img = np.expand_dims(img, -1) |
|
img = np.ascontiguousarray(img.transpose(2, 0, 1)) |
|
results['img'] = DC(to_tensor(img), stack=True) |
|
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: |
|
if key not in results: |
|
continue |
|
results[key] = DC(to_tensor(results[key])) |
|
if 'gt_masks' in results: |
|
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) |
|
if 'gt_semantic_seg' in results: |
|
results['gt_semantic_seg'] = DC( |
|
to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) |
|
return results |
|
|
|
def _add_default_meta_keys(self, results): |
|
"""Add default meta keys. |
|
|
|
We set default meta keys including `pad_shape`, `scale_factor` and |
|
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and |
|
`Pad` are implemented during the whole pipeline. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to convert. |
|
|
|
Returns: |
|
results (dict): Updated result dict contains the data to convert. |
|
""" |
|
img = results['img'] |
|
results.setdefault('pad_shape', img.shape) |
|
results.setdefault('scale_factor', 1.0) |
|
num_channels = 1 if len(img.shape) < 3 else img.shape[2] |
|
results.setdefault( |
|
'img_norm_cfg', |
|
dict( |
|
mean=np.zeros(num_channels, dtype=np.float32), |
|
std=np.ones(num_channels, dtype=np.float32), |
|
to_rgb=False)) |
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ |
|
|
|
|
|
@PIPELINES.register_module() |
|
class Collect(object): |
|
"""Collect data from the loader relevant to the specific task. |
|
|
|
This is usually the last stage of the data loader pipeline. Typically keys |
|
is set to some subset of "img", "proposals", "gt_bboxes", |
|
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks". |
|
|
|
The "img_meta" item is always populated. The contents of the "img_meta" |
|
dictionary depends on "meta_keys". By default this includes: |
|
|
|
- "img_shape": shape of the image input to the network as a tuple \ |
|
(h, w, c). Note that images may be zero padded on the \ |
|
bottom/right if the batch tensor is larger than this shape. |
|
|
|
- "scale_factor": a float indicating the preprocessing scale |
|
|
|
- "flip": a boolean indicating if image flip transform was used |
|
|
|
- "filename": path to the image file |
|
|
|
- "ori_shape": original shape of the image as a tuple (h, w, c) |
|
|
|
- "pad_shape": image shape after padding |
|
|
|
- "img_norm_cfg": a dict of normalization information: |
|
|
|
- mean - per channel mean subtraction |
|
- std - per channel std divisor |
|
- to_rgb - bool indicating if bgr was converted to rgb |
|
|
|
Args: |
|
keys (Sequence[str]): Keys of results to be collected in ``data``. |
|
meta_keys (Sequence[str], optional): Meta keys to be converted to |
|
``mmcv.DataContainer`` and collected in ``data[img_metas]``. |
|
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', |
|
'pad_shape', 'scale_factor', 'flip', 'flip_direction', |
|
'img_norm_cfg')`` |
|
""" |
|
|
|
def __init__(self, |
|
keys, |
|
meta_keys=('filename', 'ori_filename', 'ori_shape', |
|
'img_shape', 'pad_shape', 'scale_factor', 'flip', |
|
'flip_direction', 'img_norm_cfg')): |
|
self.keys = keys |
|
self.meta_keys = meta_keys |
|
|
|
def __call__(self, results): |
|
"""Call function to collect keys in results. The keys in ``meta_keys`` |
|
will be converted to :obj:mmcv.DataContainer. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to collect. |
|
|
|
Returns: |
|
dict: The result dict contains the following keys |
|
|
|
- keys in``self.keys`` |
|
- ``img_metas`` |
|
""" |
|
|
|
data = {} |
|
img_meta = {} |
|
for key in self.meta_keys: |
|
img_meta[key] = results[key] |
|
data['img_metas'] = DC(img_meta, cpu_only=True) |
|
for key in self.keys: |
|
data[key] = results[key] |
|
return data |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + \ |
|
f'(keys={self.keys}, meta_keys={self.meta_keys})' |
|
|
|
|
|
@PIPELINES.register_module() |
|
class WrapFieldsToLists(object): |
|
"""Wrap fields of the data dictionary into lists for evaluation. |
|
|
|
This class can be used as a last step of a test or validation |
|
pipeline for single image evaluation or inference. |
|
|
|
Example: |
|
>>> test_pipeline = [ |
|
>>> dict(type='LoadImageFromFile'), |
|
>>> dict(type='Normalize', |
|
mean=[123.675, 116.28, 103.53], |
|
std=[58.395, 57.12, 57.375], |
|
to_rgb=True), |
|
>>> dict(type='Pad', size_divisor=32), |
|
>>> dict(type='ImageToTensor', keys=['img']), |
|
>>> dict(type='Collect', keys=['img']), |
|
>>> dict(type='WrapFieldsToLists') |
|
>>> ] |
|
""" |
|
|
|
def __call__(self, results): |
|
"""Call function to wrap fields into lists. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to wrap. |
|
|
|
Returns: |
|
dict: The result dict where value of ``self.keys`` are wrapped \ |
|
into list. |
|
""" |
|
|
|
|
|
for key, val in results.items(): |
|
results[key] = [val] |
|
return results |
|
|
|
def __repr__(self): |
|
return f'{self.__class__.__name__}()' |
|
|