Package parsimony :: Package functions :: Package nesterov :: Module properties
[hide private]
[frames] | no frames]

Source Code for Module parsimony.functions.nesterov.properties

  1  # -*- coding: utf-8 -*- 
  2  """ 
  3  The :mod:`parsimony.functions.nesterov.properties` module contains the 
  4  necessary properties for Nesterov functions. 
  5   
  6  Created on Mon Feb  3 10:51:33 2014 
  7   
  8  Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved. 
  9   
 10  @author:  Tommy Löfstedt 
 11  @email:   lofstedt.tommy@gmail.com 
 12  @license: BSD 3-clause. 
 13  """ 
 14  #import abc 
 15  # 
 16  #import numpy as np 
 17  # 
 18  #import parsimony.utils.consts as consts 
 19  # 
 20  #__all__ = ["NesterovFunction"] 
 21  # 
 22  # 
 23  #class NesterovFunction(object): 
 24  #    """Abstract superclass of Nesterov functions. 
 25  # 
 26  #    Attributes: 
 27  #    ---------- 
 28  #    l : Non-negative float. The Lagrange multiplier, or regularisation 
 29  #            constant, of the function. 
 30  # 
 31  #    mu : Non-negative float. The Nesterov function regularisation constant for 
 32  #            the smoothing. 
 33  # 
 34  #    penalty_start : Non-negative integer. The number of columns, variables 
 35  #            etc., to except from penalisation. Equivalently, the first index 
 36  #            to be penalised. Default is 0, all columns are included. 
 37  #    """ 
 38  #    __metaclass__ = abc.ABCMeta 
 39  # 
 40  #    def __init__(self, l, A=None, mu=consts.TOLERANCE, penalty_start=0): 
 41  #        """ 
 42  #        Parameters 
 43  #        ---------- 
 44  #        l : Non-negative float. The Lagrange multiplier, or regularisation 
 45  #                constant, of the function. 
 46  # 
 47  #        A : A (usually sparse) array. The linear operator for the Nesterov 
 48  #                formulation. May not be None! 
 49  # 
 50  #        mu: Non-negative float. The regularisation constant for the smoothing. 
 51  # 
 52  #        penalty_start : Non-negative integer. The number of columns, variables 
 53  #                etc., to except from penalisation. Equivalently, the first 
 54  #                index to be penalised. Default is 0, all columns are included. 
 55  #        """ 
 56  #        self.l = float(l) 
 57  #        if A is None: 
 58  #            raise ValueError("The linear operator A must not be None.") 
 59  #        self._A = A 
 60  #        self.mu = float(mu) 
 61  #        self.penalty_start = int(penalty_start) 
 62  # 
 63  #    def fmu(self, beta, mu=None): 
 64  #        """Returns the smoothed function value. 
 65  # 
 66  #        Parameters 
 67  #        ---------- 
 68  #        beta : Numpy array. A weight vector. 
 69  # 
 70  #        mu : Non-negative float. The regularisation constant for the smoothing. 
 71  #        """ 
 72  #        if mu is None: 
 73  #            mu = self.get_mu() 
 74  # 
 75  #        alpha = self.alpha(beta) 
 76  #        alpha_sqsum = 0.0 
 77  #        for a in alpha: 
 78  #            alpha_sqsum += np.sum(a ** 2.0) 
 79  # 
 80  #        Aa = self.Aa(alpha) 
 81  # 
 82  #        if self.penalty_start > 0: 
 83  #            beta_ = beta[self.penalty_start:, :] 
 84  #        else: 
 85  #            beta_ = beta 
 86  # 
 87  #        return self.l * (np.dot(beta_.T, Aa)[0, 0] - (mu / 2.0) * alpha_sqsum) 
 88  # 
 89  #    @abc.abstractmethod 
 90  #    def phi(self, alpha, beta): 
 91  #        """ Function value with known alpha. 
 92  #        """ 
 93  #        raise NotImplementedError('Abstract method "phi" must be ' 
 94  #                                  'specialised!') 
 95  # 
 96  #    def grad(self, beta): 
 97  #        """ Gradient of the function at beta. 
 98  # 
 99  #        Parameters 
100  #        ---------- 
101  #        beta : Numpy array. The point at which to evaluate the gradient. 
102  #        """ 
103  #        if self.l < consts.TOLERANCE: 
104  #            return 0.0 
105  # 
106  #        # \beta need not be sliced here. 
107  #        alpha = self.alpha(beta) 
108  # 
109  #        if self.penalty_start > 0: 
110  #            grad = self.l * np.vstack((np.zeros((self.penalty_start, 1)), 
111  #                                       self.Aa(alpha))) 
112  #        else: 
113  #            grad = self.l * self.Aa(alpha) 
114  # 
115  ##        approx_grad = utils.approx_grad(self.f, beta, eps=1e-6) 
116  ##        print "NesterovFunction:", maths.norm(grad - approx_grad) 
117  # 
118  #        return grad 
119  # 
120  #    def get_mu(self): 
121  #        """Return the regularisation constant for the smoothing. 
122  #        """ 
123  #        return self.mu 
124  # 
125  #    def set_mu(self, mu): 
126  #        """Set the regularisation constant for the smoothing. 
127  # 
128  #        Parameters 
129  #        ---------- 
130  #        mu : Non-negative float. The regularisation constant for the smoothing 
131  #                to use from now on. 
132  # 
133  #        Returns 
134  #        ------- 
135  #        old_mu : Non-negative float. The old regularisation constant for the 
136  #                smoothing that was overwritten and no longer is used. 
137  #        """ 
138  #        old_mu = self.get_mu() 
139  # 
140  #        self.mu = mu 
141  # 
142  #        return old_mu 
143  # 
144  #    def alpha(self, beta): 
145  #        """ Dual variable of the Nesterov function. 
146  # 
147  #        Parameters 
148  #        ---------- 
149  #        beta : Numpy array (p-by-1). The variable for which to compute the dual 
150  #                variable alpha. 
151  #        """ 
152  #        if self.penalty_start > 0: 
153  #            beta_ = beta[self.penalty_start:, :] 
154  #        else: 
155  #            beta_ = beta 
156  # 
157  #        A = self.A() 
158  #        mu = self.get_mu() 
159  #        alpha = [0] * len(A) 
160  #        for i in xrange(len(A)): 
161  #            alpha[i] = A[i].dot(beta_) / mu 
162  # 
163  #        # Apply projection 
164  #        alpha = self.project(alpha) 
165  # 
166  #        return alpha 
167  # 
168  #    def A(self): 
169  #        """ Linear operator of the Nesterov function. 
170  #        """ 
171  #        return self._A 
172  # 
173  #    def Aa(self, alpha): 
174  #        """ Compute A'*alpha. 
175  # 
176  #        Parameters 
177  #        ---------- 
178  #        alpha : Numpy array (x-by-1). The dual variable alpha. 
179  #        """ 
180  #        A = self.A() 
181  #        Aa = A[0].T.dot(alpha[0]) 
182  #        for i in xrange(1, len(A)): 
183  #            Aa += A[i].T.dot(alpha[i]) 
184  # 
185  #        return Aa 
186  # 
187  #    @abc.abstractmethod 
188  #    def project(self, alpha): 
189  #        """ Projection onto the compact space of the Nesterov function. 
190  # 
191  #        Parameters 
192  #        ---------- 
193  #        alpha : Numpy array (x-by-1). The not-yet-projected dual variable 
194  #                alpha. 
195  #        """ 
196  #        raise NotImplementedError('Abstract method "project" must be ' 
197  #                                  'specialised!') 
198  # 
199  #    @abc.abstractmethod 
200  #    def M(self): 
201  #        """ The maximum value of the regularisation of the dual variable. We 
202  #        have 
203  # 
204  #            M = max_{alpha in K} 0.5*|alpha|²_2. 
205  #        """ 
206  #        raise NotImplementedError('Abstract method "M" must be ' 
207  #                                  'specialised!') 
208  # 
209  #    @abc.abstractmethod 
210  #    def estimate_mu(self, beta): 
211  #        """ Compute a "good" value of mu with respect to the given beta. 
212  # 
213  #        Parameters 
214  #        ---------- 
215  #        beta : Numpy array (p-by-1). The primal variable at which to compute a 
216  #                feasible value of mu. 
217  #        """ 
218  #        raise NotImplementedError('Abstract method "estimate_mu" must be ' 
219  #                                  'specialised!') 
220