Skip to content

Neural Networks

Small feed-forward networks for inference inside Photonscore analysis pipelines. The public Python package ships inference only — training utilities are kept in the internal release.

import photonscore.ann as ann

# Load a trained network (weights typically come from a .photons
# attribute, a side file, or a Photonscore-provided model).
net = ann.FeedForward(layers=[16, 32, 8], weights=trained_weights)

# Predict
y_pred = net(X_test)

Need to train your own network on Photonscore data? Reach out to email@photonscore.de — the training toolchain lives in the internal release.

FeedForward

ann.ff.FeedForward

Dense feed-forward network with tanh activations.

Attributes:

Name Type Description
W

List of weight matrices, one per layer.

b

List of bias vectors, one per layer.

weights

Flat 1-D NumPy view containing all biases and weights (settable to load a trained network).

layers

Sizes of the input + hidden layers (excluding the implicit single-element output).

Source code in ann/ff.py
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
class FeedForward:
  """Dense feed-forward network with `tanh` activations.

  Attributes:
      W: List of weight matrices, one per layer.
      b: List of bias vectors, one per layer.
      weights: Flat 1-D NumPy view containing all biases and weights
          (settable to load a trained network).
      layers: Sizes of the input + hidden layers (excluding the
          implicit single-element output).
  """

  def __init__(self, layers: list[int], weights: np.ndarray | None = None):
    """Build a network with the given layer widths.

    Args:
        layers: Iterable of integer layer sizes (input + hidden).
            A single-element output layer is appended automatically.
        weights: Optional pre-trained weight vector. If supplied, it
            is unpacked into :attr:`W` and :attr:`b`.
    """
    # Create the structure
    self.W = []
    self.b = []
    ll = [l for l in layers]
    ll.append(1)
    for i in range(len(ll) - 1):
      a = ll[i]
      b = ll[i + 1]
      self.W.append(np.zeros(shape = [a, b]))
      self.b.append(np.zeros(shape = b))
    if weights is not None:
      self.weights = weights

  @property
  def layers(self):
    return [w.shape[0] for w in self.W[:]]

  @property
  def weights(self):
    res = np.zeros(shape = weights_size(self.layers))
    offset = 0
    for i in range(len(self.W)):
      n = self.b[i].size
      res[offset:offset + n] = self.b[i].flatten()
      offset += n
      n = self.W[i].size
      res[offset:offset + n] = self.W[i].flatten()
      offset += n
    return res

  @weights.setter
  def weights(self, value):
    v = np.array(value)[:]
    offset = 0
    for i in range(len(self.W)):
      n = self.b[i].size
      self.b[i][:] = v[offset:offset + n].reshape(self.b[i].shape)
      offset += n
      n = self.W[i].size
      self.W[i][:] = v[offset:offset + n].reshape(self.W[i].shape)
      offset += n

  def evaluate(self, x: np.ndarray) -> np.ndarray:
    """Run a forward pass on a batch of input vectors `x`."""
    for i in range(len(self.W)):
      if i > 0:
        x = np.tanh(x)
      x = np.dot(self.W[i].T, x.T).T
      x += np.repeat(self.b[i], x.shape[0]).reshape(x.T.shape).T
    return x.flatten()

  def __call__(self, x: np.ndarray) -> np.ndarray:
    return self.evaluate(x)

__init__(layers, weights=None)

Build a network with the given layer widths.

Parameters:

Name Type Description Default
layers list[int]

Iterable of integer layer sizes (input + hidden). A single-element output layer is appended automatically.

required
weights ndarray | None

Optional pre-trained weight vector. If supplied, it is unpacked into :attr:W and :attr:b.

None
Source code in ann/ff.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def __init__(self, layers: list[int], weights: np.ndarray | None = None):
  """Build a network with the given layer widths.

  Args:
      layers: Iterable of integer layer sizes (input + hidden).
          A single-element output layer is appended automatically.
      weights: Optional pre-trained weight vector. If supplied, it
          is unpacked into :attr:`W` and :attr:`b`.
  """
  # Create the structure
  self.W = []
  self.b = []
  ll = [l for l in layers]
  ll.append(1)
  for i in range(len(ll) - 1):
    a = ll[i]
    b = ll[i + 1]
    self.W.append(np.zeros(shape = [a, b]))
    self.b.append(np.zeros(shape = b))
  if weights is not None:
    self.weights = weights

evaluate(x)

Run a forward pass on a batch of input vectors x.

Source code in ann/ff.py
92
93
94
95
96
97
98
99
def evaluate(self, x: np.ndarray) -> np.ndarray:
  """Run a forward pass on a batch of input vectors `x`."""
  for i in range(len(self.W)):
    if i > 0:
      x = np.tanh(x)
    x = np.dot(self.W[i].T, x.T).T
    x += np.repeat(self.b[i], x.shape[0]).reshape(x.T.shape).T
  return x.flatten()

evaluate

ann.evaluate.evaluate(layers, weights, inputs)

Run a forward pass through a trained network.

Parameters:

Name Type Description Default
layers list[int]

Iterable of integer layer sizes used during training.

required
weights ndarray

Flat weight vector returned by :func:photonscore.ann.train.

required
inputs ndarray

Inputs to evaluate, shape (n_samples, layers[0]).

required

Returns:

Type Description
ndarray

NumPy array of predictions, one per sample.

Source code in ann/evaluate.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
def evaluate(layers: list[int], weights: np.ndarray, inputs: np.ndarray) -> np.ndarray:
  """Run a forward pass through a trained network.

  Args:
      layers: Iterable of integer layer sizes used during training.
      weights: Flat weight vector returned by
          :func:`photonscore.ann.train`.
      inputs: Inputs to evaluate, shape ``(n_samples, layers[0])``.

  Returns:
      NumPy array of predictions, one per sample.
  """
  return _ann_evaluate(layers, np.array(weights), np.array(inputs))