1
2 """
3 The :mod:`parsimony.functions.properties` module contains properties that
4 describes the functionality of the multiblock functions.
5
6 Try to keep the inheritance tree loop-free unless absolutely impossible.
7
8 Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
9
10 Created on Mon Feb 3 09:55:51 2014
11
12 @author: Tommy Löfstedt
13 @email: lofstedt.tommy@gmail.com
14 @license: BSD 3-clause.
15 """
16 import abc
17
18 import numpy as np
19
20 from .. import properties
21 import parsimony.utils.consts as consts
22
23 __all__ = ["MultiblockFunction", "MultiblockGradient",
24 "MultiblockLipschitzContinuousGradient",
25 "MultiblockProximalOperator", "MultiblockProjectionOperator",
26 "MultiblockContinuation", "MultiblockStepSize"]
30 """ This is a function that is the combination (i.e. sum) of other
31 multiblock, composite or atomic functions. The difference from
32 CompositeFunction is that this function assumes that relevant functions
33 accept an index, i, that is the block we are working with.
34 """
35 __metaclass__ = abc.ABCMeta
36
37 constraints = dict()
38
40 """Add a constraint to this function.
41 """
42 if index in self.constraints:
43 self.constraints[index].append(function)
44 else:
45 self.constraints[index] = [function]
46
48 """Returns the constraint functions for the function with the given
49 index. Returns an empty list if no constraint functions exist for the
50 given index.
51 """
52 if index in self.constraints:
53 return self.constraints[index]
54 else:
55 return []
56
59
60 __metaclass__ = abc.ABCMeta
61
62 @abc.abstractmethod
63 - def grad(self, x, index):
64 """Gradient of the function.
65
66 Parameters
67 ----------
68 x : List of numpy arrays. The weight vectors, x[index] is the point at
69 which to evaluate the gradient.
70
71 index : Non-negative integer. Which variable the gradient is for.
72 """
73 raise NotImplementedError('Abstract method "grad" must be '
74 'specialised!')
75
77 """Numerical approximation of the gradient.
78
79 Parameters
80 ----------
81 x : List of numpy arrays. The weight vectors, x[index] is the point at
82 which to evaluate the gradient.
83
84 index : Non-negative integer. Which variable the gradient is for.
85
86 eps : Positive integer. The precision of the numerical solution.
87 Smaller is better, but too small may result in floating point
88 precision errors.
89 """
90 x_ = x[index]
91 p = x_.shape[0]
92 grad = np.zeros(x_.shape)
93
94
95
96 start = 0
97 for i in xrange(start, p):
98 x_[i, 0] -= eps
99 loss1 = self.f(x)
100 x_[i, 0] += 2.0 * eps
101 loss2 = self.f(x)
102 x_[i, 0] -= eps
103 grad[i, 0] = (loss2 - loss1) / (2.0 * eps)
104
105 return grad
106
109
110 __metaclass__ = abc.ABCMeta
111
112 @abc.abstractmethod
113 - def L(self, w, index):
114 """Lipschitz constant of the gradient with given index.
115
116 Parameters
117 ----------
118 w : List of numpy arrays. The weight vectors, w[index] is the point at
119 which to evaluate the Lipschitz constant.
120
121 index : Non-negative integer. The variable for which the Lipschitz
122 constant should be evaluated.
123 """
124 raise NotImplementedError('Abstract method "L" must be '
125 'specialised!')
126
129
130 __metaclass__ = abc.ABCMeta
131
132 @abc.abstractmethod
134 """A proximal operator of the non-differentiable part of the function
135 with the given index.
136
137 Parameters
138 ----------
139 w : List of numpy arrays. The weight vectors.
140
141 index : Non-negative integer. Which variable the step is for.
142
143 factor : Positive float. A factor by which the Lagrange multiplier is
144 scaled. This is usually the step size.
145 """
146 raise NotImplementedError('Abstract method "prox" must be '
147 'specialised!')
148
151
152 __metaclass__ = abc.ABCMeta
153
154 @abc.abstractmethod
156 """The projection operator of a constraint that corresponds to the
157 function with the given index.
158
159 Parameters
160 ----------
161 w : List of numpy arrays. The weight vectors.
162
163 index : Non-negative integer. Which variable the step is for.
164 """
165 raise NotImplementedError('Abstract method "proj" must be '
166 'specialised!')
167
170
171 __metaclass__ = abc.ABCMeta
172
173 @abc.abstractmethod
174 - def mu_opt(self, eps, index):
175 """The optimal value of mu given epsilon.
176
177 Parameters
178 ----------
179 eps : Positive float. The desired precision.
180
181 index : Non-negative integer. Which block this is for.
182
183 Returns
184 -------
185 mu : Positive float. The optimal regularisation parameter.
186 """
187 raise NotImplementedError('Abstract method "mu_opt" must be '
188 'specialised!')
189
190 @abc.abstractmethod
192 """The optimal value of epsilon given mu.
193
194 Parameters
195 ----------
196 mu : Positive float. The regularisation constant of the smoothing.
197
198 index : Non-negative integer. Which block this is for.
199
200 Returns
201 -------
202 eps : Positive float. The optimal precision.
203 """
204 raise NotImplementedError('Abstract method "eps_opt" must be '
205 'specialised!')
206
207 @abc.abstractmethod
209 """The maximum value of epsilon.
210
211 Parameters
212 ----------
213 mu : Positive float. The regularisation constant of the smoothing.
214
215 index : Non-negative integer. Which block this is for.
216
217 Returns
218 -------
219 eps : Positive float. The upper limit, the maximum, precision.
220 """
221 raise NotImplementedError('Abstract method "eps_max" must be '
222 'specialised!')
223
226
227 __metaclass__ = abc.ABCMeta
228
229 @abc.abstractmethod
230 - def step(self, w, index):
231 """The step size to use in descent methods.
232
233 Parameters
234 ----------
235 w : Numpy array. The point at which to determine the step size.
236
237 index : Non-negative integer. The variable which the step is for.
238 """
239 raise NotImplementedError('Abstract method "step" must be '
240 'specialised!')
241