Package parsimony :: Package datasets :: Package simulate :: Module l1mu_l2_tvmu
[hide private]
[frames] | no frames]

Source Code for Module parsimony.datasets.simulate.l1mu_l2_tvmu

  1  # -*- coding: utf-8 -*- 
  2  """ 
  3  Created on Fri Sep 27 14:47:48 2013 
  4   
  5  Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved. 
  6   
  7  @author:  Tommy Löfstedt 
  8  @email:   tommy.loefstedt@cea.fr 
  9  @license: BSD 3-clause. 
 10  """ 
 11  import numpy as np 
 12  from grad import grad_l1mu 
 13  from grad import grad_l2_squared 
 14  from grad import grad_tvmu 
 15  from utils import bisection_method 
 16   
 17  __all__ = ['load'] 
 18   
 19   
20 -def load(l, k, g, beta, M, e, mu, snr=None, shape=None):
21 """Returns data generated such that we know the exact solution. 22 23 The data generated by this function is fit to the Linear regression + L1 + 24 L2 + Smoothed total variation function, i.e.: 25 26 f(b) = (1 / 2).|Xb - y|² + l.L1mu(b) + (k / 2).|b|² + g.TVmu(b), 27 28 where L1mu is the smoothed L1 norm, |.|² is the squared L2 norm and TVmu is 29 the smoothed total variation penalty. 30 31 Parameters 32 ---------- 33 l : The L1 regularisation parameter. 34 35 k : The L2 regularisation parameter. 36 37 g : The total variation regularisation parameter. 38 39 beta : The regression vector to generate data from. 40 41 M : The matrix to use when building data. This matrix carries the desired 42 correlation structure of the generated data. The generated data 43 will be a column-scaled version of this matrix. 44 45 e : The error vector e = Xb - y. This vector carries the desired 46 distribution of the residual. 47 48 mu : The Nesterov smoothing regularisation parameter. 49 50 snr : Signal-to-noise ratio between model and residual. 51 52 shape : The underlying dimension of the regression vector, beta. E.g. the 53 beta may represent an underlying 3D image. In that case the shape 54 is a three-tuple with dimensions (Z, Y, X). If shape is not 55 provided, the shape is set to (p,) where p is the dimension of 56 beta. 57 58 Returns 59 ------- 60 X : The generated X matrix. 61 62 y : The generated y vector. 63 64 beta : The regression vector with the correct snr. 65 """ 66 l = float(l) 67 k = float(k) 68 g = float(g) 69 70 if shape == None: 71 shape = (beta.shape[0],) 72 73 if snr != None: 74 def f(x): 75 X, y = _generate(l, k, g, x * beta, M, e, mu, shape) 76 77 # print "norm(beta) = ", np.linalg.norm(beta) 78 # print "norm(Xbeta) = ", np.linalg.norm(np.dot(X, beta)) 79 # print "norm(e) = ", np.linalg.norm(e) 80 81 print "snr = %.5f = %.5f = |X.b| / |e| = %.5f / %.5f" \ 82 % (snr, np.linalg.norm(np.dot(X, x * beta)) \ 83 / np.linalg.norm(e), 84 np.linalg.norm(np.dot(X, x * beta)), np.linalg.norm(e)) 85 86 return (np.linalg.norm(np.dot(X, x * beta)) / np.linalg.norm(e)) \ 87 - snr
88 89 snr = bisection_method(f, low=0.0, high=np.sqrt(snr), maxiter=30) 90 91 beta = beta * snr 92 93 X, y = _generate(l, k, g, beta, M, e, mu, shape) 94 95 return X, y, beta 96 97
98 -def _generate(l, k, g, beta, M, e, mu, shape):
99 100 p = np.prod(shape) 101 102 gradL1mu = grad_l1mu(beta, mu) 103 gradL2 = grad_l2_squared(beta) 104 gradTVmu = grad_tvmu(beta, shape, mu) 105 106 alpha = -(l * gradL1mu + k * gradL2 + g * gradTVmu) 107 alpha = np.divide(alpha, np.dot(M.T, e)) 108 109 X = np.zeros(M.shape) 110 for i in xrange(p): 111 X[:, i] = M[:, i] * alpha[i, 0] 112 113 y = np.dot(X, beta) - e 114 115 return X, y
116