Module rnnbuilder.base
Base classes not intended for direct use.
Expand source code
"""Base classes not intended for direct use."""
import torch
from ._modules import _ModuleBase
import rnnbuilder as _rb
class ModuleFactory:
"""Factory base class. Factories are used to build networks recursively. Call `make_model` to get a PyTorch model.
"""
def make_model(self, in_shape: tuple, input_mode='sequence', full_state=False):
"""Returns a usable PyTorch model corresponding to the factory. Every call returns a model independent from
previous calls with its own parameters. This applies to sub-modules as well. Parameter sharing is not supported.
Args:
in_shape (tuple): shape of the input without time or batch dimensions.
input_mode: One of 'single, batch, sequence'. Whether to expect time and batch dimensions in the input. (output
matches) Order is always time, batch, data0, data1, ... (Can be changed using `OuterModule.configure` later)
full_state: whether the network returns the network state for all elements of the input sequence. If enabled,
some modules might compute slower. Default: False (Can be changed using `OuterModule.configure` later)
"""
return OuterModule(self._assemble_module(tuple(in_shape), False), input_mode, full_state, tuple(in_shape))
def _assemble_module(self, in_shape, unrolled):
return _ModuleBase(in_shape)
def _shape_change(self, in_shape):
return in_shape
class LayerInput:
"""Base class for inputs to `Layer` in `Network`"""
_mode = 'stack'
class LayerBase(LayerInput):
"""Base class for layers of a `Network`"""
def __init__(self):
self._registered = False
def sum(self, *layers: 'LayerBase'):
"""adds up self and given layers to form an input for a `Layer`. Data dimensions need to be identical.
"""
if not self._registered:
raise Exception('This layer is not part of a network yet. Chaining is not allowed.')
return _rb.Sum(self, *layers)
def stack(self, *layers: 'LayerBase') -> '_rb.Stack':
"""stacks self and given layers along the first data dimension using `torch.cat` to form an input for a `Layer`
"""
if not self._registered:
raise Exception('This layer is not part of a network yet. Chaining is not allowed.')
return _rb.Stack(self, *layers)
def append(self, *layers: 'LayerBase') -> '_rb.List':
"""returns a `rnnbuilder.List`"""
if not self._registered:
raise Exception('This layer is not part of a network yet. Chaining is not allowed.')
return _rb.List(self, *layers)
def apply(self, *module_facs, placeholder=None):
"""A `rnnbuilder.Layer` is formed by applying the given `ModuleFactory`s to the output of the calling layer."""
if not self._registered:
raise Exception('This layer is not part of a network yet. Chaining is not allowed.')
return _rb.Layer(self, module_facs, placeholder=placeholder)
class InputBase(LayerInput):
"""Base class for input aggregations
"""
def __init__(self, *layers: LayerBase):
self.layers = layers
def apply(self, *module_facs, placeholder=None):
"""A `rnnbuilder.Layer` is formed by applying the given `ModuleFactory`s to the calling input aggregate."""
return _rb.Layer(self, module_facs, placeholder=placeholder)
class OuterModule(torch.nn.Module):
"""Wrapper for all models generated by rnnbuilder. Offers some configuration as well as internal features and input
checks.
"""
def __init__(self, inner, input_mode, full_state, in_shape):
super().__init__()
self.in_shape = in_shape
self.inner = inner
self.register_buffer('_device_zero', torch.zeros(1))
self.to('cpu')
self.input_mode = input_mode
self.configure(full_state=full_state)
"""Run-time configuration options. Only specified options will be overridden.
Args:
input_mode: One of 'single, batch, sequence'. Whether to expect time and batch dimensions in the input. (output
matches) Order is always time, batch, data0, data1, ...
full_state: whether the network returns the network state for all elements of the input sequence. If enabled,
some modules might compute slower. Default: False
"""
def configure(self, input_mode=None, full_state=None):
if full_state is not None:
for module in self.modules():
module._full_state = full_state
if input_mode is not None:
if input_mode not in ['single, batch, sequence']:
raise Exception('Wrong input to OuterModule.configure. ' + str(
input_mode) + ' is not a valid value for input_mode.')
self.input_mode = input_mode
def _apply(self, fn):
ret = super()._apply(fn)
for module in self.modules():
module.device = self._device_zero.device
return ret
def forward(self, x, h=None):
""""""
if self.input_mode == 'single':
x = x.unsqueeze(0)
if self.input_mode != 'sequence':
x = x.unsqueeze(0)
if tuple(x.shape[2:]) != self.in_shape:
raise Exception('Input does not have the shape specified during the make_model call.'
' Maybe you forgot dimensions or should change input_mode using configure')
h = h or self.inner.get_initial_state(x.shape[1])
out, h = self.inner(x, h)
if self.input_mode == 'single':
out = out.squeeze(0)
if self.input_mode != 'sequence':
out = out.squeeze(0)
return out, h
def forward_log(self, x, h):
"""Not implemented yet."""
pass #TODO: implement logging
# only single step _inputs?
def get_initial_state(self, batch_size):
return self.inner.get_initial_state(batch_size)
Classes
class InputBase (*layers: LayerBase)
-
Base class for input aggregations
Expand source code
class InputBase(LayerInput): """Base class for input aggregations """ def __init__(self, *layers: LayerBase): self.layers = layers def apply(self, *module_facs, placeholder=None): """A `rnnbuilder.Layer` is formed by applying the given `ModuleFactory`s to the calling input aggregate.""" return _rb.Layer(self, module_facs, placeholder=placeholder)
Ancestors
Subclasses
Methods
def apply(self, *module_facs, placeholder=None)
-
A
Layer
is formed by applying the givenModuleFactory
s to the calling input aggregate.Expand source code
def apply(self, *module_facs, placeholder=None): """A `rnnbuilder.Layer` is formed by applying the given `ModuleFactory`s to the calling input aggregate.""" return _rb.Layer(self, module_facs, placeholder=placeholder)
class LayerBase
-
Base class for layers of a
Network
Expand source code
class LayerBase(LayerInput): """Base class for layers of a `Network`""" def __init__(self): self._registered = False def sum(self, *layers: 'LayerBase'): """adds up self and given layers to form an input for a `Layer`. Data dimensions need to be identical. """ if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.Sum(self, *layers) def stack(self, *layers: 'LayerBase') -> '_rb.Stack': """stacks self and given layers along the first data dimension using `torch.cat` to form an input for a `Layer` """ if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.Stack(self, *layers) def append(self, *layers: 'LayerBase') -> '_rb.List': """returns a `rnnbuilder.List`""" if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.List(self, *layers) def apply(self, *module_facs, placeholder=None): """A `rnnbuilder.Layer` is formed by applying the given `ModuleFactory`s to the output of the calling layer.""" if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.Layer(self, module_facs, placeholder=placeholder)
Ancestors
Subclasses
Methods
def append(self, *layers: LayerBase) ‑> List
-
returns a
List
Expand source code
def append(self, *layers: 'LayerBase') -> '_rb.List': """returns a `rnnbuilder.List`""" if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.List(self, *layers)
def apply(self, *module_facs, placeholder=None)
-
A
Layer
is formed by applying the givenModuleFactory
s to the output of the calling layer.Expand source code
def apply(self, *module_facs, placeholder=None): """A `rnnbuilder.Layer` is formed by applying the given `ModuleFactory`s to the output of the calling layer.""" if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.Layer(self, module_facs, placeholder=placeholder)
def stack(self, *layers: LayerBase) ‑> Stack
-
stacks self and given layers along the first data dimension using
torch.cat
to form an input for aLayer
Expand source code
def stack(self, *layers: 'LayerBase') -> '_rb.Stack': """stacks self and given layers along the first data dimension using `torch.cat` to form an input for a `Layer` """ if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.Stack(self, *layers)
def sum(self, *layers: LayerBase)
-
adds up self and given layers to form an input for a
Layer
. Data dimensions need to be identical.Expand source code
def sum(self, *layers: 'LayerBase'): """adds up self and given layers to form an input for a `Layer`. Data dimensions need to be identical. """ if not self._registered: raise Exception('This layer is not part of a network yet. Chaining is not allowed.') return _rb.Sum(self, *layers)
class LayerInput
-
Base class for inputs to
Layer
inNetwork
Expand source code
class LayerInput: """Base class for inputs to `Layer` in `Network`""" _mode = 'stack'
Subclasses
class ModuleFactory
-
Factory base class. Factories are used to build networks recursively. Call
make_model
to get a PyTorch model.Expand source code
class ModuleFactory: """Factory base class. Factories are used to build networks recursively. Call `make_model` to get a PyTorch model. """ def make_model(self, in_shape: tuple, input_mode='sequence', full_state=False): """Returns a usable PyTorch model corresponding to the factory. Every call returns a model independent from previous calls with its own parameters. This applies to sub-modules as well. Parameter sharing is not supported. Args: in_shape (tuple): shape of the input without time or batch dimensions. input_mode: One of 'single, batch, sequence'. Whether to expect time and batch dimensions in the input. (output matches) Order is always time, batch, data0, data1, ... (Can be changed using `OuterModule.configure` later) full_state: whether the network returns the network state for all elements of the input sequence. If enabled, some modules might compute slower. Default: False (Can be changed using `OuterModule.configure` later) """ return OuterModule(self._assemble_module(tuple(in_shape), False), input_mode, full_state, tuple(in_shape)) def _assemble_module(self, in_shape, unrolled): return _ModuleBase(in_shape) def _shape_change(self, in_shape): return in_shape
Subclasses
- Network
- Sequential
- rnnbuilder.custom._factories._NonRecurrentFactory
- rnnbuilder.custom._factories._RecurrentFactory
- BatchNorm2d
- Linear
- ReLU
- Sigmoid
- Tanh
- Discontinuous
Methods
def make_model(self, in_shape: tuple, input_mode='sequence', full_state=False)
-
Returns a usable PyTorch model corresponding to the factory. Every call returns a model independent from previous calls with its own parameters. This applies to sub-modules as well. Parameter sharing is not supported.
Args
in_shape
:tuple
- shape of the input without time or batch dimensions.
input_mode
- One of 'single, batch, sequence'. Whether to expect time and batch dimensions in the input. (output
- matches) Order is always time, batch, data0, data1, … (Can be changed using
OuterModule.configure()
later) full_state
- whether the network returns the network state for all elements of the input sequence. If enabled,
some modules might compute slower. Default: False (Can be changed using
OuterModule.configure()
later)Expand source code
def make_model(self, in_shape: tuple, input_mode='sequence', full_state=False): """Returns a usable PyTorch model corresponding to the factory. Every call returns a model independent from previous calls with its own parameters. This applies to sub-modules as well. Parameter sharing is not supported. Args: in_shape (tuple): shape of the input without time or batch dimensions. input_mode: One of 'single, batch, sequence'. Whether to expect time and batch dimensions in the input. (output matches) Order is always time, batch, data0, data1, ... (Can be changed using `OuterModule.configure` later) full_state: whether the network returns the network state for all elements of the input sequence. If enabled, some modules might compute slower. Default: False (Can be changed using `OuterModule.configure` later) """ return OuterModule(self._assemble_module(tuple(in_shape), False), input_mode, full_state, tuple(in_shape))
class OuterModule (inner, input_mode, full_state, in_shape)
-
Wrapper for all models generated by rnnbuilder. Offers some configuration as well as internal features and input checks.
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class OuterModule(torch.nn.Module): """Wrapper for all models generated by rnnbuilder. Offers some configuration as well as internal features and input checks. """ def __init__(self, inner, input_mode, full_state, in_shape): super().__init__() self.in_shape = in_shape self.inner = inner self.register_buffer('_device_zero', torch.zeros(1)) self.to('cpu') self.input_mode = input_mode self.configure(full_state=full_state) """Run-time configuration options. Only specified options will be overridden. Args: input_mode: One of 'single, batch, sequence'. Whether to expect time and batch dimensions in the input. (output matches) Order is always time, batch, data0, data1, ... full_state: whether the network returns the network state for all elements of the input sequence. If enabled, some modules might compute slower. Default: False """ def configure(self, input_mode=None, full_state=None): if full_state is not None: for module in self.modules(): module._full_state = full_state if input_mode is not None: if input_mode not in ['single, batch, sequence']: raise Exception('Wrong input to OuterModule.configure. ' + str( input_mode) + ' is not a valid value for input_mode.') self.input_mode = input_mode def _apply(self, fn): ret = super()._apply(fn) for module in self.modules(): module.device = self._device_zero.device return ret def forward(self, x, h=None): """""" if self.input_mode == 'single': x = x.unsqueeze(0) if self.input_mode != 'sequence': x = x.unsqueeze(0) if tuple(x.shape[2:]) != self.in_shape: raise Exception('Input does not have the shape specified during the make_model call.' ' Maybe you forgot dimensions or should change input_mode using configure') h = h or self.inner.get_initial_state(x.shape[1]) out, h = self.inner(x, h) if self.input_mode == 'single': out = out.squeeze(0) if self.input_mode != 'sequence': out = out.squeeze(0) return out, h def forward_log(self, x, h): """Not implemented yet.""" pass #TODO: implement logging # only single step _inputs? def get_initial_state(self, batch_size): return self.inner.get_initial_state(batch_size)
Ancestors
- torch.nn.modules.module.Module
Class variables
var dump_patches : bool
var training : bool
Methods
def configure(self, input_mode=None, full_state=None)
-
Expand source code
def configure(self, input_mode=None, full_state=None): if full_state is not None: for module in self.modules(): module._full_state = full_state if input_mode is not None: if input_mode not in ['single, batch, sequence']: raise Exception('Wrong input to OuterModule.configure. ' + str( input_mode) + ' is not a valid value for input_mode.') self.input_mode = input_mode
def forward(self, x, h=None) ‑> Callable[..., Any]
-
Expand source code
def forward(self, x, h=None): """""" if self.input_mode == 'single': x = x.unsqueeze(0) if self.input_mode != 'sequence': x = x.unsqueeze(0) if tuple(x.shape[2:]) != self.in_shape: raise Exception('Input does not have the shape specified during the make_model call.' ' Maybe you forgot dimensions or should change input_mode using configure') h = h or self.inner.get_initial_state(x.shape[1]) out, h = self.inner(x, h) if self.input_mode == 'single': out = out.squeeze(0) if self.input_mode != 'sequence': out = out.squeeze(0) return out, h
def forward_log(self, x, h)
-
Not implemented yet.
Expand source code
def forward_log(self, x, h): """Not implemented yet.""" pass #TODO: implement logging # only single step _inputs?
def get_initial_state(self, batch_size)
-
Expand source code
def get_initial_state(self, batch_size): return self.inner.get_initial_state(batch_size)