From fb18dc29b034429a25a52d2a6fb6b176bb777e36 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Fri, 2 Mar 2018 16:12:29 -0500 Subject: [PATCH 01/84] initial commit of matrix normal base API , regression, and MNRSA --- brainiak/matnormal/__init__.py | 188 +++++++ brainiak/matnormal/covs.py | 524 ++++++++++++++++++ brainiak/matnormal/matnormal_likelihoods.py | 228 ++++++++ brainiak/matnormal/mnrsa.py | 163 ++++++ brainiak/matnormal/regression.py | 150 +++++ brainiak/matnormal/utils.py | 65 +++ brainiak/utils/utils.py | 286 ++++++++++ examples/matnormal/MN-RSA.ipynb | 384 +++++++++++++ tests/matnormal/test_cov.py | 296 ++++++++++ tests/matnormal/test_matnormal_logp.py | 56 ++ .../test_matnormal_logp_conditional.py | 87 +++ .../matnormal/test_matnormal_logp_marginal.py | 75 +++ tests/matnormal/test_matnormal_regression.py | 105 ++++ tests/matnormal/test_matnormal_rsa.py | 81 +++ 14 files changed, 2688 insertions(+) create mode 100644 brainiak/matnormal/__init__.py create mode 100644 brainiak/matnormal/covs.py create mode 100644 brainiak/matnormal/matnormal_likelihoods.py create mode 100644 brainiak/matnormal/mnrsa.py create mode 100644 brainiak/matnormal/regression.py create mode 100644 brainiak/matnormal/utils.py create mode 100644 examples/matnormal/MN-RSA.ipynb create mode 100644 tests/matnormal/test_cov.py create mode 100644 tests/matnormal/test_matnormal_logp.py create mode 100644 tests/matnormal/test_matnormal_logp_conditional.py create mode 100644 tests/matnormal/test_matnormal_logp_marginal.py create mode 100644 tests/matnormal/test_matnormal_regression.py create mode 100644 tests/matnormal/test_matnormal_rsa.py diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py new file mode 100644 index 000000000..b095ed601 --- /dev/null +++ b/brainiak/matnormal/__init__.py @@ -0,0 +1,188 @@ +from .mnrsa import MNRSA +from .regression import MatnormRegression + +"""The matrix variate normal distribution, with conditional and marginal identities +========================================================================================== + +.. math:: + \\DeclareMathOperator{\\Tr}{Tr} + \\newcommand{\\trp}{{^\\top}} % transpose + \\newcommand{\\trace}{\\text{Trace}} % trace + \\newcommand{\\inv}{^{-1}} + \\newcommand{\\mb}{\\mathbf{b}} + \\newcommand{\\M}{\\mathbf{M}} + \\newcommand{\\C}{\\mathbf{C}} + \\newcommand{\\G}{\\mathbf{G}} + \\newcommand{\\A}{\\mathbf{A}} + \\newcommand{\\R}{\\mathbf{R}} + \\renewcommand{\\S}{\\mathbf{S}} + \\newcommand{\\B}{\\mathbf{B}} + \\newcommand{\\Q}{\\mathbf{Q}} + \\newcommand{\\mH}{\\mathbf{H}} + \\newcommand{\\U}{\\mathbf{U}} + \\newcommand{\\mL}{\\mathbf{L}} + \\newcommand{\\diag}{\\mathrm{diag}} + \\newcommand{\\etr}{\\mathrm{etr}} + \\renewcommand{\\H}{\\mathbf{H}} + \\newcommand{\\vecop}{\\mathrm{vec}} + \\newcommand{\\I}{\\mathbf{I}} + \\newcommand{\\X}{\\mathbf{X}} + \\newcommand{\\Y}{\\mathbf{Y}} + \\newcommand{\\Z}{\\mathbf{Z}} + \\renewcommand{\\L}{\\mathbf{L}} + + +The matrix-variate normal distribution is a generalization to matrices of the +normal distribution. Another name for it is the multivariate normal distribution +with kronecker separable covariance. The distributional intuition is as follows +if :math:`X \\sim \\mathcal{MN}(M,R,C)` then +:math:`\\mathrm{vec}(X)\\sim\\mathcal{N}(\\mathrm{vec}(M), C \\otimes R)`, +where :math:`\\mathrm{vec}(\\cdot)` is the vectorization operator and +:math:`otimes` is the Kronecker product. If we think of X as a matrix of TRs by +voxels in the fMRI setting, then this model assumes that each voxel has the same +TR-by-TR covariance structure (represented by the matrix R), and each volume has +the same spatial covariance (represented by the matrix C). This assumption +allows us to model both covariances separately. We can assume that the spatial +covariance itself is kronecker-structured, which implies that the spatial +covariance of voxels is the same in the X, Y and Z dimensions. + +The log-likelihood for the matrix-normal density is: + +.. math:: + \\log p(X\\mid \\M,\\R, \\C) = -2\\log mn - m \\log|\\C| - n \\log|\\R| - \\Tr\\left[\\C\\inv(\\X-\\M)\\trp\\R\\inv(\\X-\\M)\\right] + + +Here :math:`X` and :math:`M` are both :math:`m\\times n` matrices, :math:`\\R` +is :math:`m\\times m` and :math:`\\C` is :math:`n\\times n`. + +The `brainiak.matnormal` package provides structure to infer models that +can be stated in the matrix-normal notation that are useful for fMRI analysis. +It provides a few interfaces. `MatnormModelBase` is intended to be subclasses +from by matrix-variate models. It provides a wrapper for the tensorflow +optimizer that provides convergence checks based on thresholds on the function +value and gradient, and simple verbose outputs. It also provides an interface +for noise covariances (`CovBase`). Any class that follows the interface +can be used as a noise covariance in any of the matrix normal models. The +package includes a variety of noise covariances to work with, as well as an +interface to use any of the kernels in the `GPflow` package. + +Matrix normal marginals +------------------------- + +Here we extend the multivariate gaussian marginalization identity to matrix +normals. This is used in a number of the models in the package. Below, we +use lowercase subscripts for sizes to make dimensionalities easier to track. +Uppercase subscripts for covariances help keep track where they come from. + +.. math:: + \\mathbf{X}_{ij} \\sim \\mathcal{MN}(\\mathbf{A}_{ij}, \\Sigma_{\\mathbf{X}i},\\Sigma_{\\mathbf{X}j})\\\\ + \\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{B}_{jk}, \\Sigma_{\\mathbf{Y}j},\\Sigma_{\\mathbf{Y}k})\\\\ + \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{Y}_{jk} + \\mathbf{C}_{ik}, \\Sigma_{\\mathbf{Z}_i}, \\Sigma_{\\mathbf{Z}_k})\\\\ + + +We vectorize, and covert to a form we recognize as $y \\sim \\mathcal{N}(Mx+b, \\Sigma)$. + +.. math:: + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{Y}_{jk}+\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})\\\\ + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{Y}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})\\\\ + + +Now we can use our standard gaussian marginalization identity: + +.. math:: + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i} + (\\I_k\\otimes\\X_{ij})(\\Sigma_{\\mathbf{Y}_k}\\otimes\\Sigma_{\\mathbf{Y}_j})(\\I_k\\otimes\\X_{ij})\\trp )\\\\ + + +Collect terms using the mixed-product property of kronecker products: + +.. math:: + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{\\mathbf{Y}_k}\\otimes \\X_{ij}\\Sigma_{\\mathbf{Y}_j}\\X_{ij}\\trp) + + +Now, we can see that the marginal density is a matrix-variate normal only if +:math:`\\Sigma_{\\mathbf{Z}_k}= \\Sigma_{\\mathbf{Y}_k}` -- that is, the +variable we're marginalizing over has the same covariance in the dimension +we're *not* marginalizing over as the marginal density. Otherwise the densit +is well-defined but the covariance retains its kronecker structure. So we let +:math:`\\Sigma_k:=\\Sigma_{\\mathbf{Z}_k}= \\Sigma_{\\mathbf{Y}_k}`, factor, +and transform it back into a matrix normal: + +.. math:: + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{k}\\otimes\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{_k}\\otimes \\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp)\\\\ + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{k}\\otimes(\\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp))\\\\ + \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij} \\sim \\mathcal{MN}(\\X\\mathbf{B}_{jk} + \\mathbf{C}_{ik}, \\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp,\\Sigma_{k}) + + +We can do it in the other direction as well, because if +:math:`\\X \\sim \\mathcal{MN}(M, U, V)` then :math:`\\X\\trp \\sim \\mathcal{MN}(M\\trp, V, U)`: + +.. math:: + \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{Y}_{jk}\\trp\\mathbf{X}_{ij}\\trp + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k},\\Sigma_{\\mathbf{Z}_i})\\\\ + \\mbox{let } \\Sigma_i := \\Sigma_{\\mathbf{Z}_i}=\\Sigma_{\\mathbf{X}_i} \\\\ + \\cdots\\\\ + \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{A}_{jk}\\trp\\mathbf{X}_{ij}\\trp + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k} + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y,\\Sigma_{\\mathbf{Z}_i})\\\\ + \\mathbf{Z}_{ik}\\mid\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{A}_{jk}+ \\mathbf{C}_{ik},\\Sigma_{\\mathbf{Z}_i},\\Sigma_{\\mathbf{Z}_k} + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y) + +These marginal likelihoods are implemented relatively efficiently in +`MatnormModelBase.matnorm_logp_marginal_row` and +`MatnormModelBase.matnorm_logp_marginal_col`. + +Partitioned matrix normal conditionals +-------------------------------------------------- + +Here we extend the multivariate gaussian conditional identity to matrix normals. +This is used for prediction in some models. Below, we use lowercase subscripts +for sizes to make dimensionalities easier to track. Uppercase subscripts for +covariances help keep track where they come from. + + +Next, we do the same for the partitioned gaussian identity. First two vectorized +matrix-normals that form our partition: + +.. math:: + \\mathbf{X}_{ij} &\\sim \\mathcal{MN}(\\mathbf{A}_{ij}, \\Sigma_{i}, \\Sigma_{j}) \\rightarrow \\vecop[\\mathbf{X}_{ij}] \\sim \\mathcal{N}(\\vecop[\\mathbf{A}_{ij}], \\Sigma_{j}\\otimes \\Sigma_{i})\\\\ + \\mathbf{Y}_{ik} &\\sim \\mathcal{MN}(\\mathbf{B}_{ik}, \\Sigma_{i}, \\Sigma_{k}) \\rightarrow \\vecop[\\mathbf{Y}_{ik}] \\sim \\mathcal{N}(\\vecop[\\mathbf{B}_{ik}], \\Sigma_{k}\\otimes \\Sigma_{i})\\\\ + \\begin{bmatrix}\\vecop[\\mathbf{X}_{ij}] \\\\ \\vecop[\\mathbf{Y}_{ik}] + \\end{bmatrix} + & \\sim \\mathcal{N}\\left(\\vecop\\begin{bmatrix}\\mathbf{A}_{ij} \\\\ \\mathbf{B}_{ik} + \\end{bmatrix} + , \\begin{bmatrix} \\Sigma_{j}\\otimes \\Sigma_i & \\Sigma_{jk} \\otimes \\Sigma_i \\\\ + \\Sigma_{kj}\\otimes \\Sigma_i & \\Sigma_{k} \\otimes \\Sigma_i\\end{bmatrix}\\right) + +We apply the standard partitioned Gaussian identity and simplify using the +properties of the :math:`\\vecop` operator and the mixed product property +of kronecker products: + +.. math:: + \\vecop[\\X_{ij}] \\mid \\vecop[\\Y_{ik}]\\sim\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\otimes\\Sigma_i)(\\Sigma_k\\inv\\otimes\\Sigma_i\\inv)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]),\\\\ + & \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\otimes\\Sigma_i)(\\Sigma_k\\inv\\otimes\\Sigma_i\\inv) (\\Sigma_{kj}\\otimes\\Sigma_i))\\\\ + =\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\Sigma_i\\Sigma_i\\inv)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\ + & \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes\\Sigma_i\\Sigma_i\\inv \\Sigma_i))\\\\ + =\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\I)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\ + & \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes\\Sigma_i)\\\\ + =\\mathcal{N}(&\\vecop[\\A_{ij}] + \\vecop[\\Y_{ik}-\\B_{ik}\\Sigma_k\\inv\\Sigma_{kj}], (\\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj})\\otimes\\Sigma_i) + + +Next, we recognize that this multivariate gaussian is equivalent to the +following matrix variate gaussian: + +.. math:: + \\X_{ij} \\mid \\Y_{ik}\\sim \\mathcal{MN}(&\\A_{ij} +(\\Y_{ik}-\\B_{ik})\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i, \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}) + +The conditional in the other direction can be written by working through the +same algebra: + +.. math:: + \\Y_{ik} \\mid \\X_{ij}\\sim \\mathcal{MN}(&\\B_{ik} +(\\X_{ij}-\\A_{ij})\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i, \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}) + +Finally, vertical rather than horizontal concatenation (yielding a partitioned +row rather than column covariance) can be written by recognizing the behavior +of the matrix normal under transposition: + +.. math:: + \\X\\trp_{ji} \\mid \\Y\\trp_{ki}\\sim \\mathcal{MN}(&\\A\\trp_{ji} +\\Sigma_{jk}\\Sigma_k\\inv(\\Y\\trp_{ki}-\\B\\trp_{ki}), \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i)\\\\ + \\Y\\trp_{ki} \\mid \\X\\trp_{ji}\\sim \\mathcal{MN}(&\\B\\trp_{ki} +\\Sigma_{kj}\\Sigma_j\\inv(\\X\\trp_{ji}-\\A\\trp_{ji}), \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i) + +These conditional likelihoods are implemented relatively efficiently in `MatnormModelBase.matnorm_logp_conditional_row` and `MatnormModelBase.matnorm_logp_conditional_col`. + +""" diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py new file mode 100644 index 000000000..4990c9136 --- /dev/null +++ b/brainiak/matnormal/covs.py @@ -0,0 +1,524 @@ +import tensorflow as tf +import numpy as np +import abc +import scipy.linalg +import scipy.sparse +from tensorflow.contrib.distributions import InverseGamma, WishartCholesky +from brainiak.matnormal.utils import define_scope, xx_t +from brainiak.utils.utils import tf_solve_lower_triangular_kron,\ + tf_solve_upper_triangular_kron, \ + tf_solve_lower_triangular_masked_kron, \ + tf_solve_upper_triangular_masked_kron + +__all__ = ['CovBase', + 'CovIdentity', + 'CovAR1', + 'CovIsotropic', + 'CovDiagonal', + 'CovDiagonalGammaPrior', + 'CovUnconstrainedCholesky', + 'CovUnconstrainedCholeskyWishartReg', + 'CovUnconstrainedInvCholesky', + 'CovKroneckerFactored'] + + +class CovBase: + """Base metaclass for noise covariances + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, size): + self.size = size + + @abc.abstractmethod + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to fit + this covariance + """ + pass + + @abc.abstractproperty + def logdet(self): + """ log|Sigma| + """ + pass + + @abc.abstractmethod + def Sigma_inv_x(self, X): + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """ + pass + + @define_scope + def Sigma(self): + """return Sigma + """ + return tf.matrix_inverse(self.Sigma_inv) + + @define_scope + def Sigma_inv(self): + """ Sigma^{-1}. Override me with more efficient implementation in subclasses + """ + return self.Sigma_inv_x(tf.diag(tf.ones([self.size], + dtype=tf.float64))) + + @define_scope + def logp(self): + """ Log-likelihood of this covariance (useful for regularization) + """ + return tf.constant(0, dtype=tf.float64) + + +class CovTFWrap(CovBase): + """ thin wrapper around a TF tensor + """ + def __init__(self, Sigma): + + self.L = tf.cholesky(Sigma) + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to fit + this covariance + """ + return [] + + def Sigma_inv_x(self, X): + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """ + return tf.cholesky_solve(self.L, X) + + @define_scope + def logdet(self): + return 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) + + +class CovIdentity(CovBase): + """Identity noise covariance. + """ + def __init__(self, size): + super(CovIdentity, self).__init__(size) + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to + fit this covariance + """ + return [] + + @define_scope + def logdet(self): + """ log|Sigma| + """ + return tf.constant(0.0, 'float64') + + def Sigma_inv_x(self, X): + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """ + return X + + @define_scope + def Sigma_inv(self): + """ Sigma^{-1}. + """ + return tf.diag(tf.ones([self.size], dtype=tf.float64)) + + +class CovAR1(CovBase): + """AR1 covariance + """ + def __init__(self, size, rho=None, sigma=None, scan_onsets=None): + + # Similar to BRSA trick I think + if scan_onsets is None: + self.run_sizes = [size] + self.offdiag_template = tf.constant(scipy.linalg.toeplitz(np.r_[0, + 1, np.zeros(size-2)]), dtype=tf.float64) + self.diag_template = tf.constant(np.diag(np.r_[0, np.ones(size-2), 0])) + else: + self.run_sizes = np.ediff1d(np.r_[scan_onsets, size]) + sub_offdiags = [scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(r-2)]) + for r in self.run_sizes] + self.offdiag_template = tf.constant(scipy.sparse.block_diag(sub_offdiags).toarray()) + subdiags = [np.diag(np.r_[0, np.ones(r-2), 0]) for r in self.run_sizes] + self.diag_template = tf.constant(scipy.sparse.block_diag(subdiags).toarray()) + + self.I = tf.constant(np.eye(size)) + + if sigma is None: + self.log_sigma = tf.Variable(tf.random_normal([1], dtype=tf.float64), + name="sigma") + else: + self.log_sigma = tf.Variable(np.log(sigma), name="sigma") + + if rho is None: + self.rho_unc = tf.Variable(tf.random_normal([1], dtype=tf.float64), + name="rho") + else: + self.rho_unc = tf.Variable(np.log(rho), name="rho") + + super(CovAR1, self).__init__(size) + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to + fit this covariance + """ + return [self.rho_unc, self.log_sigma] + + @define_scope + def logdet(self): + """ log|Sigma| + """ + rho = 2 * tf.sigmoid(self.rho_unc) - 1 + sigma = tf.exp(self.log_sigma) + + return tf.reduce_sum(2 * tf.constant(self.run_sizes, dtype=tf.float64) * + tf.log(sigma) - tf.log(1 - tf.square(rho))) + + def Sigma_inv_x(self, X): + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """ + return tf.matmul(self.Sigma_inv, X) + + @define_scope + def Sigma_inv(self): + """ Sigma^{-1}. + Unlike BRSA we assume stationarity within block so no special case + for first/last element of a block. This makes constructing this + matrix easier. + reprsimil.BRSA says (I - rho1 * D + rho1**2 * F) / sigma**2 + """ + rho = 2 * tf.sigmoid(self.rho_unc) - 1 + sigma = tf.exp(self.log_sigma) + return (self.I - rho * self.offdiag_template + rho**2 * + self.diag_template) / tf.square(sigma) + + +class CovIsotropic(CovBase): + """Scaled identity (isotropic) noise covariance. + """ + + def __init__(self, size, sigma=None): + super(CovIsotropic, self).__init__(size) + if sigma is None: + self.log_sigma = tf.Variable(tf.random_normal([1], dtype=tf.float64), + name="sigma") + else: + self.log_sigma = tf.Variable(np.log(sigma), name="sigma") + + @define_scope + def sigma(self): + return tf.exp(self.log_sigma) + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to fit + this covariance + """ + return [self.log_sigma] + + @define_scope + def logdet(self): + """ log|Sigma| + """ + return self.size * self.log_sigma + + def Sigma_inv_x(self, X): + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """ + return X / self.sigma + + @define_scope + def Sigma_inv(self): + """ Sigma^{-1}. + """ + return tf.diag(tf.ones([self.size], dtype=tf.float64)) / self.sigma + + +class CovDiagonal(CovBase): + """Uncorrelated (diagonal) noise covariance + """ + def __init__(self, size, sigma=None): + super(CovDiagonal, self).__init__(size) + if sigma is None: + self.logprec = tf.Variable(tf.random_normal([size], dtype=tf.float64), + name="precisions") + else: + self.logprec = tf.Variable(np.log(1/sigma), name="log-precisions") + + @define_scope + def prec(self): + return tf.exp(self.logprec) + + @define_scope + def prec_dimaugmented(self): + return tf.expand_dims(self.prec, -1) + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to fit + this covariance + """ + return [self.logprec] + + @define_scope + def logdet(self): + """ log|Sigma| + """ + return -tf.reduce_sum(self.logprec) + + def Sigma_inv_x(self, X): + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """ + return tf.multiply(self.prec_dimaugmented, X) + + @define_scope + def Sigma_inv(self): + """ Sigma^{-1}. + """ + return tf.diag(tf.ones([self.size], dtype=tf.float64) * self.prec) + + +class CovDiagonalGammaPrior(CovDiagonal): + """Uncorrelated (diagonal) noise covariance + """ + def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): + super(CovDiagonalGammaPrior, self).__init__(size, sigma) + + self.ig = InverseGamma(concentration=tf.constant(alpha, dtype=tf.float64), + rate=tf.constant(beta, dtype=tf.float64)) + + @define_scope + def logp(self): + return tf.reduce_sum(self.ig.log_prob(self.prec)) + + +class CovUnconstrainedCholesky(CovBase): + """Unconstrained noise covariance parameterized in terms of its cholesky + """ + + def __init__(self, size, Sigma=None): + super(CovUnconstrainedCholesky, self).__init__(size) + if Sigma is None: + self.L_full = tf.Variable(tf.random_normal([size, size], + dtype=tf.float64), + name="L_full", dtype="float64") + else: + # in order to respect the Sigma we got passed in, we log the diag + # which we will later exp. a little ugly but this is a rare use case + L = np.linalg.cholesky(Sigma) + L[np.diag_indices_from(L)] = np.log(np.diag(L)) + self.L_full = tf.Variable(L, name="L_full", + dtype="float64") + + @define_scope + def L(self): + """ Zero out triu of L_full to get cholesky L. + This seems dumb but TF is smart enough to set the gradient to zero + for those elements, and the alternative (fill_lower_triangular from + contrib.distributions) is inefficient and recommends not doing the + packing (for now). + Also: to make the parameterization unique we exp the diagonal so + it's positive. + """ + L_indeterminate = tf.matrix_band_part(self.L_full, -1, 0) + return tf.matrix_set_diag(L_indeterminate, + tf.exp(tf.matrix_diag_part(L_indeterminate))) + + @define_scope + def Sigma(self): + """ covariance + """ + return xx_t(self.L) + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to fit + this covariance + """ + return [self.L_full] + + @define_scope + def logdet(self): + """ log|Sigma| using a cholesky solve + """ + return 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) + + def Sigma_inv_x(self, X): + """ + Given this Sigma and some X, compute :math:`Sigma^{-1} * x` using + cholesky solve + """ + return tf.cholesky_solve(self.L, X) + + +class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): + """Unconstrained noise covariance parameterized in terms of its cholesky factor. + Regularized using the trick from Chung et al. 2015 such that as the + covariance approaches singularity, the likelihood goes to 0. + """ + + def __init__(self, size, Sigma=None): + super(CovUnconstrainedCholeskyWishartReg, self).__init__(size) + self.wishartReg = WishartCholesky(df=tf.constant(size+2, dtype=tf.float64), + scale=tf.constant(1e5 * np.eye(size), + dtype=tf.float64)) + + @define_scope + def logp(self): + """ Log-likelihood of this covariance + """ + # l = self.wishartReg.log_prob(self.Sigma) + # l = tf.Print(l, [self.Sigma], 'sigma') + # l = tf.Print(l, [tf.self_adjoint_eigvals(self.L)], 'eigs') + return self.wishartReg.log_prob(self.Sigma) + + +class CovUnconstrainedInvCholesky(CovBase): + """Unconstrained noise covariance parameterized in terms of its precision cholesky + """ + + def __init__(self, size, invSigma=None): + if invSigma is None: + self.Linv_full = tf.Variable(tf.random_normal([size, size], + dtype=tf.float64), name="Linv_full") + else: + self.Linv_full = tf.Variable(np.linalg.cholesky(invSigma), name="Linv_full") + + super(CovUnconstrainedInvCholesky, self).__init__(size) + + @define_scope + def Linv(self): + """ Zero out triu of L_full to get cholesky L. + This seems dumb but TF is smart enough to set the gradient to zero + for those elements, and the alternative (fill_lower_triangular from + contrib.distributions) is inefficient and recommends not doing the + packing (for now). + Also: to make the parameterization unique we log the diagonal so + it's positive. + """ + L_indeterminate = tf.matrix_band_part(self.Linv_full, -1, 0) + return tf.matrix_set_diag(L_indeterminate, + tf.exp(tf.matrix_diag_part(L_indeterminate))) + + @define_scope + def Sigma(self): + """ cov + """ + return tf.matrix_inverse(self.Sigma_inv) + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized to fit + this covariance + """ + return [self.Linv_full] + + @define_scope + def logdet(self): + """ log|Sigma| using a cholesky solve + """ + return -2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) + + def Sigma_inv_x(self, X): + """ + Given this Sigma and some X, compute :math:`Sigma^{-1} * x` using + cholesky solve + """ + return tf.matmul(xx_t(self.Linv), X) + + +class CovKroneckerFactored(CovBase): + """ Kronecker product noise covariance parameterized in terms + of its component cholesky factors + """ + + def __init__(self, sizes, Sigmas=None, mask=None): + """Initialize the kronecker factored covariance object. + + Arguments + --------- + sizes : list + List of dimensions (int) of the factors + E.g. ``sizes = [2, 3]`` will create two factors of + sizes 2x2 and 3x3 giving us a 6x6 dimensional covariance + Sigmas : list (default : None) + Initial guess for the covariances. List of positive definite + covariance matrices the same sizes as sizes. + mask : int array (default : None) + 1-D tensor with length equal to product of sizes with 1 for + valid elements and 0 for don't care + + Returns + ------- + None + + Raises + ------ + TypeError + If sizes is not a list + """ + if not isinstance(sizes, list): + raise TypeError('sizes is not a list') + + self.sizes = sizes + self.nfactors = len(sizes) + self.size = np.prod(np.array(sizes), dtype=np.int32) + + if Sigmas is None: + self.L_full = [tf.Variable(tf.random_normal([sizes[i], sizes[i]], + dtype=tf.float64), name="L"+str(i)+"_full") + for i in range(self.nfactors)] + else: + self.L_full = [tf.Variable(np.linalg.cholesky(Sigmas[i]), + name="L"+str(i)+"_full") for i in range(self.nfactors)] + self.mask = mask + + @define_scope + def L(self): + """ Zero out triu of all factors in L_full to get cholesky L. + This seems dumb but TF is smart enough to set the gradient to + zero for those elements, and the alternative + (fill_lower_triangular from contrib.distributions) + is inefficient and recommends not doing the packing (for now). + Also: to make the parameterization unique we log the diagonal + so it's positive. + """ + L_indeterminate = [tf.matrix_band_part(mat, -1, 0) + for mat in self.L_full] + return [tf.matrix_set_diag(mat, tf.exp(tf.matrix_diag_part(mat))) + for mat in L_indeterminate] + + def get_optimize_vars(self): + """ Returns a list of tf variables that need to get optimized + to fit this covariance + """ + return self.L_full + + @define_scope + def logdet(self): + """ log|Sigma| using the diagonals of the cholesky factors. + """ + if self.mask is None: + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in self.L]) + n_prod = tf.reduce_prod(n_list) + logdet = tf.stack([tf.reduce_sum(tf.log(tf.diag_part(mat))) + for mat in self.L]) + logdetfinal = tf.reduce_sum((logdet*n_prod)/n_list) + else: + n_list = [tf.shape(mat)[0] for mat in self.L] + mask_reshaped = tf.reshape(self.mask, n_list) + logdet = 0.0 + for i in range(self.nfactors): + indices = list(range(self.nfactors)) + indices.remove(i) + logdet += tf.log(tf.diag_part(self.L[i])) * tf.to_double(tf.reduce_sum(mask_reshaped, indices)) + logdetfinal = tf.reduce_sum(logdet) + return (2.0*logdetfinal) + + def Sigma_inv_x(self, X): + """ Given this Sigma and some X, compute Sigma^{-1} * x using + traingular solves with the cholesky factors. + Do 2 triangular solves - L L^T x = y as L z = y and L^T x = z + """ + if self.mask is None: + z = tf_solve_lower_triangular_kron(self.L, X) + x = tf_solve_upper_triangular_kron(self.L, z) + else: + z = tf_solve_lower_triangular_masked_kron(self.L, X, self.mask) + x = tf_solve_upper_triangular_masked_kron(self.L, z, self.mask) + return x diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py new file mode 100644 index 000000000..c6398d8f7 --- /dev/null +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -0,0 +1,228 @@ +import tensorflow as tf +from .utils import scaled_I +import logging + +logger = logging.getLogger(__name__) + + +def _condition(X): + s = tf.svd(X, compute_uv=False) + return tf.reduce_max(s)/tf.reduce_min(s) + + +def solve_det_marginal(x, sigma, A, Q): + """ + Use matrix inversion lemma for the solve: + .. math:: + (\Sigma + AQA')^{-1} X =\\ + \Sigma^{-1} - \Sigma^{-1} A (Q^{-1} + A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} + + Use matrix determinant lemma for determinant: + ..math:: + \log|(\Sigma + AQA')| = \log|Q^{-1} + A' \Sigma^{-1} A| + \log|Q| + \log|\Sigma| + """ + + # we care about condition number of i_qf + if logging.getLogger().isEnabledFor(logging.DEBUG): + A = tf.Print(A, [_condition(Q.Sigma_inv + tf.matmul(A, + sigma.Sigma_inv_x(A), transpose_a=True))], 'i_qf condition') + # since the sigmas expose only inverse, we invert their + # conditions to get what we want + A = tf.Print(A, [1/_condition(Q.Sigma_inv)], 'Q condition') + A = tf.Print(A, [1/_condition(sigma.Sigma_inv)], 'sigma condition') + A = tf.Print(A, [tf.reduce_max(A), tf.reduce_min(A)], 'A minmax') + + # cholesky of (Qinv + A' Sigma^{-1} A) + i_qf_cholesky = tf.cholesky(Q.Sigma_inv + tf.matmul(A, + sigma.Sigma_inv_x(A), transpose_a=True)) + + logdet = Q.logdet + sigma.logdet +\ + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(i_qf_cholesky))) + + if logging.getLogger().isEnabledFor(logging.DEBUG): + logdet = tf.Print(logdet, [Q.logdet], 'Q logdet') + logdet = tf.Print(logdet, [sigma.logdet], 'sigma logdet') + logdet = tf.Print(logdet, [2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(i_qf_cholesky)))], + 'iqf logdet') + + # A' Sigma^{-1} + Atrp_Sinv = tf.matmul(A, sigma.Sigma_inv, transpose_a=True) + # (Qinv + A' Sigma^{-1} A)^{-1} A' Sigma^{-1} + prod_term = tf.cholesky_solve(i_qf_cholesky, Atrp_Sinv) + + solve = tf.matmul(sigma.Sigma_inv_x(scaled_I(1.0, sigma.size) - + tf.matmul(A, prod_term)), x) + + return solve, logdet + + +def solve_det_conditional(x, sigma, A, Q): + """ + Use matrix inversion lemma for the solve: + .. math:: + (\Sigma - AQ^{-1}A')^{-1} X =\\ + \Sigma^{-1} + \Sigma^{-1} A (Q - A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} X + + Use matrix determinant lemma for determinant: + ..math:: + \log|(\Sigma - AQ^{-1}A')| = \log|Q - A' \Sigma^{-1} A| - \log|Q| + \log|\Sigma| + """ + + # (Q - A' Sigma^{-1} A) + i_qf_cholesky = tf.cholesky(Q.Sigma - tf.matmul(A, + sigma.Sigma_inv_x(A), transpose_a=True)) + + logdet = -Q.logdet + sigma.logdet +\ + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(i_qf_cholesky))) + + # A' Sigma^{-1} + Atrp_Sinv = tf.matmul(A, sigma.Sigma_inv, transpose_a=True) + # (Q - A' Sigma^{-1} A)^{-1} A' Sigma^{-1} + prod_term = tf.cholesky_solve(i_qf_cholesky, Atrp_Sinv) + + solve = tf.matmul(sigma.Sigma_inv_x(scaled_I(1.0, sigma.size) + + tf.matmul(A, prod_term)), x) + + return solve, logdet + + +def _mnorm_logp_internal(colsize, rowsize, logdet_row, logdet_col, + solve_row, solve_col): + """Construct logp from the solves and determinants. + """ + log2pi = 1.8378770664093453 + + if logging.getLogger().isEnabledFor(logging.DEBUG): + solve_row = tf.Print(solve_row, [tf.trace(solve_col)], 'coltrace') + solve_row = tf.Print(solve_row, [tf.trace(solve_row)], 'rowtrace') + solve_row = tf.Print(solve_row, [logdet_row], 'logdet_row') + solve_row = tf.Print(solve_row, [logdet_col], 'logdet_col') + + denominator = - rowsize * colsize * log2pi -\ + colsize * logdet_row - rowsize * logdet_col + numerator = - tf.trace(tf.matmul(solve_col, solve_row)) + return 0.5 * (numerator + denominator) + + +def matnorm_logp(x, row_cov, col_cov): + """Log likelihood for centered matrix-variate normal density. + Assumes that row_cov and col_cov follow the API defined in CovBase. + """ + + rowsize = tf.cast(tf.shape(x)[0], 'float64') + colsize = tf.cast(tf.shape(x)[1], 'float64') + + # precompute sigma_col^{-1} * x' + solve_col = col_cov.Sigma_inv_x(tf.transpose(x)) + logdet_col = col_cov.logdet + + # precompute sigma_row^{-1} * x + solve_row = row_cov.Sigma_inv_x(x) + logdet_row = row_cov.logdet + + return _mnorm_logp_internal(colsize, rowsize, logdet_row, + logdet_col, solve_row, solve_col) + + +def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): + """ + Log likelihood for centered matrix-variate normal density. + Assumes that row_cov, col_cov, and marg_cov follow the API defined + in CovBase. + + When you marginalize in mnorm, you end up with a covariance S + APA', + where P is the covariance of A in the relevant dimension. + + This method exploits the matrix inversion and determinant lemmas to + construct S + APA' given the covariance API in in CovBase. + """ + rowsize = tf.cast(tf.shape(x)[0], 'float64') + colsize = tf.cast(tf.shape(x)[1], 'float64') + + solve_col = col_cov.Sigma_inv_x(tf.transpose(x)) + logdet_col = col_cov.logdet + + solve_row, logdet_row = solve_det_marginal(x, row_cov, marg, + marg_cov) + + return _mnorm_logp_internal(colsize, rowsize, logdet_row, + logdet_col, solve_row, solve_col) + + +def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): + """ + Log likelihood for centered matrix-variate normal density. Assumes that + row_cov, col_cov, and marg_cov follow the API defined in CovBase. + + When you marginalize in mnorm, you end up with a covariance S + APA', + where P is the covariance of A in the relevant dimension. + + This method exploits the matrix inversion and determinant lemmas to + construct S + APA' given the covariance API in in CovBase. + """ + rowsize = tf.cast(tf.shape(x)[0], 'float64') + colsize = tf.cast(tf.shape(x)[1], 'float64') + + solve_row = row_cov.Sigma_inv_x(x) + logdet_row = row_cov.logdet + + solve_col, logdet_col = solve_det_marginal(tf.transpose(x), + col_cov, + tf.transpose(marg), + marg_cov) + + return _mnorm_logp_internal(colsize, rowsize, logdet_row, + logdet_col, solve_row, solve_col) + + +def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): + """ + Log likelihood for centered matrix-variate normal density. Assumes that + row_cov, col_cov, and cond_cov follow the API defined in CovBase. + + When you go from joint to conditional in mnorm, you end up with a + covariance S - APA', where P is the covariance of A in the relevant + dimension. + + This method exploits the matrix inversion and determinant lemmas to + construct S - APA' given the covariance API in in CovBase. + """ + + rowsize = tf.cast(tf.shape(x)[0], 'float64') + colsize = tf.cast(tf.shape(x)[1], 'float64') + + solve_col = col_cov.Sigma_inv_x(tf.transpose(x)) + logdet_col = col_cov.logdet + + solve_row, logdet_row = solve_det_conditional(x, row_cov, cond, + cond_cov) + + return _mnorm_logp_internal(colsize, rowsize, logdet_row, + logdet_col, solve_row, solve_col) + + +def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): + """ + Log likelihood for centered matrix-variate normal density. Assumes that + row_cov, col_cov, and cond_cov follow the API defined in CovBase. + + When you go from joint to conditional in mnorm, you end up with a + covariance S - APA', where P is the covariance of A in the relevant + dimension. + + This method exploits the matrix inversion and determinant lemmas to + construct S - APA' given the covariance API in in CovBase. + """ + rowsize = tf.cast(tf.shape(x)[0], 'float64') + colsize = tf.cast(tf.shape(x)[1], 'float64') + + solve_row = row_cov.Sigma_inv_x(x) + logdet_row = row_cov.logdet + + solve_col, logdet_col = solve_det_conditional(tf.transpose(x), + col_cov, + tf.transpose(cond), + cond_cov) + + return _mnorm_logp_internal(colsize, rowsize, logdet_row, + logdet_col, solve_row, solve_col) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py new file mode 100644 index 000000000..74fa445fd --- /dev/null +++ b/brainiak/matnormal/mnrsa.py @@ -0,0 +1,163 @@ +import tensorflow as tf +from sklearn.base import BaseEstimator +from sklearn.linear_model import LinearRegression +from .covs import CovIdentity +from brainiak.utils.utils import cov2corr +import numpy as np +from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row +from tensorflow.contrib.opt import ScipyOptimizerInterface +import logging + + +class MNRSA(BaseEstimator): + """ Matrix normal version of RSA. + + The goal of this analysis is to find the covariance of the mapping from + some design matrixX to the fMRI signal Y. It does so by marginalizing over + the actual mapping (i.e. averaging over the uncertainty in it), which + happens to correct a bias imposed by structure in the design matrix on the + RSA estimate (see Cai et al., NIPS 2016). + + This implementation makes different choices about two things relative to + `brainiak.reprsimil.BRSA`: + + 1. The noise covariance is assumed to be kronecker-separable. Informally, + this means that all voxels has the same temporal covariance, and all time + points have the same spatialcovariance. This is in contrast to BRSA, which + allows different temporal covariance for each voxel. On the other hand, + computational efficiencies enabled by this choice allow MNRSA to + support a richer class of space and time covariances (anything in + `brainiak.matnormal.covs`). + + 2. MNRSA does not estimate the nuisance timecourse X_0. Instead, + we expect the temporal noise covariance to capture the same property + (because when marginalizing over B_0 gives a low-rank component to the noise + covariance, something we hope to have available soon. + + For users: in general, if you are worried about voxels each having + different temporal noise structure,you should use `brainiak.reprsimil.BRSA`. + If you are worried about between-voxel correlations or temporal covaraince + structures that BRSA does not support, you should use MNRSA. + + .. math:: + Y \\sim \\mathcal{MN}(0, \\Sigma_t + XLL^{\\top}X^{\\top}+ X_0X_0^{\\top}, \\Sigma_s) + U = LL^{\\top} + + Parameters + ---------- + time_cov : subclass of CovBase + Temporal noise covariance class following CovBase interface. + space_cov : subclass of CovBase + Spatial noise covariance class following CovBase interface. + optimizer : string, Default :'L-BFGS' + Name of scipy optimizer to use. + optCtrl : dict, default: None + Dict of options for optimizer (e.g. {'maxiter': 100}) + + """ + + def __init__(self, time_cov, space_cov, n_nureg=5, + optimizer='L-BFGS-B', optCtrl=None): + + self.n_T = time_cov.size + self.n_V = space_cov.size + self.n_nureg = n_nureg + + self.optCtrl, self.optMethod = optCtrl, optimizer + + # placeholders for inputs + self.X = tf.placeholder(tf.float64, [self.n_T, None], name="Design") + self.Y = tf.placeholder(tf.float64, [self.n_T, self.n_V], name="Brain") + + self.X_0 = tf.Variable(tf.random_normal([self.n_T, n_nureg], + dtype=tf.float64), name="X_0") + + self.train_variables = [self.X_0] + + self.time_cov = time_cov + self.space_cov = space_cov + + self.train_variables.extend(self.time_cov.get_optimize_vars()) + self.train_variables.extend(self.space_cov.get_optimize_vars()) + + # create a tf session we reuse for this object + self.sess = tf.Session() + + def fit(self, X, y, structured_RSA_cov=None): + """ Estimate dimension reduction and cognitive model parameters + + Parameters + ---------- + X: 2d array + Brain data matrix (voxels by TRs). Y in the math + y: 2d array or vector + Behavior data matrix (behavioral obsevations by TRs). X in the math + max_iter: int, default=1000 + Maximum number of iterations to run + step: int, default=100 + Number of steps between optimizer output + restart: bool, default=True + If this is true, optimizer is restarted (e.g. for a new dataset). + Otherwise optimizer will continue from where it is now (for example + for running more iterations if the initial number was not enough). + + """ + + # self.sess.run(tf.global_variables_initializer()) + + feed_dict = {self.X: y, self.Y: X} + + self.n_c = y.shape[1] + + # initialize from naive RSA + m = LinearRegression(fit_intercept=False) + # counterintuitive given sklearn interface above: + # brain is passed in as X and design is passed in as y + m.fit(X=y, y=X) + self.naive_U_ = np.cov(m.coef_.T) + naiveRSA_L = np.linalg.cholesky(self.naive_U_) + self.naive_C_ = cov2corr(self.naive_U_) + self.L_full = tf.Variable(naiveRSA_L, name="L_full", dtype="float64") + + L_indeterminate = tf.matrix_band_part(self.L_full, -1, 0) + self.L = tf.matrix_set_diag(L_indeterminate, + tf.exp(tf.matrix_diag_part(L_indeterminate))) + + self.train_variables.extend([self.L_full]) + + self.x_stack = tf.concat([tf.matmul(self.X, self.L), self.X_0], 1) + self.sess.run(tf.global_variables_initializer(), feed_dict=feed_dict) + + optimizer = ScipyOptimizerInterface(-self.logp(), + var_list=self.train_variables, + method=self.optMethod, + options=self.optCtrl) + + if logging.getLogger().isEnabledFor(logging.INFO): + optimizer._packed_loss_grad = tf.Print(optimizer._packed_loss_grad, + [tf.reduce_min(optimizer._packed_loss_grad)], + 'mingrad') + optimizer._packed_loss_grad = tf.Print(optimizer._packed_loss_grad, + [tf.reduce_max(optimizer._packed_loss_grad)], + 'maxgrad') + optimizer._packed_loss_grad = tf.Print(optimizer._packed_loss_grad, + [self.logp()], 'logp') + + optimizer.minimize(session=self.sess, feed_dict=feed_dict) + + self.L_ = self.L.eval(session=self.sess) + self.X_0_ = self.X_0.eval(session=self.sess) + self.U_ = self.L_.dot(self.L_.T) + self.C_ = cov2corr(self.U_) + + def logp(self): + """ MNRSA Log-likelihood""" + + rsa_cov = CovIdentity(size=self.n_c + self.n_nureg) + + return self.time_cov.logp + \ + self.space_cov.logp + \ + rsa_cov.logp + \ + matnorm_logp_marginal_row(self.Y, row_cov=self.time_cov, + col_cov=self.space_cov, + marg=self.x_stack, marg_cov=rsa_cov) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py new file mode 100644 index 000000000..54d30a81c --- /dev/null +++ b/brainiak/matnormal/regression.py @@ -0,0 +1,150 @@ +import tensorflow as tf +import numpy as np +from sklearn.base import BaseEstimator +from brainiak.matnormal.matnormal_likelihoods import matnorm_logp +from tensorflow.contrib.opt import ScipyOptimizerInterface + + +class MatnormRegression(BaseEstimator): + """ This analysis allows maximum likelihood estimation of regression models + in the presence of both spatial and temporal covariance. + + ..math:: + Y \\sim \\mathcal{MN}(X\beta, time_noise_cov, space_noise_cov) + + Parameters + ---------- + time_noise_cov : subclass of CovBase + TR noise covariance class following CovBase interface. + space_noise_cov : subclass of CovBase + Voxel noise covariance class following CovBase interface. + learnRate : real, default=0.01 + Step size for the Adam optimizer + + """ + def __init__(self, time_noise_cov, space_noise_cov, + optimizer='L-BFGS-B', optCtrl=None): + + self.optCtrl, self.optMethod = optCtrl, optimizer + + self.time_noise_cov, self.space_noise_cov = time_noise_cov, space_noise_cov + + self.n_t = time_noise_cov.size + self.n_v = space_noise_cov.size + + self.Y = tf.placeholder(tf.float64, [self.n_t, self.n_v], name="Y") + + self.X = tf.placeholder(tf.float64, [self.n_t, None], name="X") + + # create a tf session we reuse for this object + self.sess = tf.Session() + + # @define_scope + def logp(self): + """ Log likelihood of model (internal) + """ + y_hat = tf.matmul(self.X, self.beta) + resid = self.Y - y_hat + return matnorm_logp(resid, self.time_noise_cov, self.space_noise_cov) + + def fit(self, X, y): + """ Compute the regression fit. + + Parameters + ---------- + X : np.array, TRs by conditions. + Design matrix + Y : np.array, TRs by voxels. + fMRI data + voxel_pos: np.array, n_voxels by 3, default: None + Spatial positions of voxels (optional). + If provided, and if space_noise_cov is a CovGP, the positions + for computing the GP covaraince matrix. Otherwise CovGP + defaults to distances of 1 unit between all voxels. + Ignored by non-GP noise covariances. + times : np.array, TRs by 1, default:None + Timestamps of observations (optional). + If provided, and if time_noise_cov is a CovGP, the the times + for computing the GP covaraince matrix. Otherwise CovGP + defaults to distances of 1 unit between all times. + Ignored by non-GP noise covariances. + max_iter: int, default=1000 + Maximum number of iterations to run + step: int, default=100 + Number of steps between optimizer status outputs. + restart: bool, default=True + If this is true, optimizer is restarted (e.g. for a new dataset). + Otherwise optimizer will continue from where it is now (for example + for running more iterations if the initial number was not enough). + """ + + self.n_c = X.shape[1] + + feed_dict = {self.X: X, self.Y: y} + self.sess.run(tf.global_variables_initializer(), feed_dict=feed_dict) + + # initialize to the least squares solution (basically all + # we need now is the cov) + sigma_inv_x = self.time_noise_cov.Sigma_inv_x(self.X).eval(session=self.sess, + feed_dict=feed_dict) + sigma_inv_y = self.time_noise_cov.Sigma_inv_x(self.Y).eval(session=self.sess, + feed_dict=feed_dict) + + beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) + + self.beta = tf.Variable(beta_init, name="beta") + + self.train_variables = [self.beta] + self.train_variables.extend(self.time_noise_cov.get_optimize_vars()) + self.train_variables.extend(self.space_noise_cov.get_optimize_vars()) + + self.sess.run(tf.variables_initializer([self.beta])) + + optimizer = ScipyOptimizerInterface(-self.logp(), + var_list=self.train_variables, + method=self.optMethod, + options=self.optCtrl) + + optimizer.minimize(session=self.sess, feed_dict=feed_dict) + + self.beta_ = self.beta.eval(session=self.sess) + + def predict(self, X): + """ Predict fMRI signal from design matrix. + + Parameters + ---------- + X : np.array, TRs by conditions. + Design matrix + + """ + + return X.dot(self.beta_) + + def calibrate(self, Y): + """ Decode design matrix from fMRI dataset, based on a previously + trained mapping. This method just does naive MLE: + + .. math:: + X = Y \Sigma_s^{-1}B'(B \Sigma_s^{-1} B')^{-1} + + Parameters + ---------- + Y : np.array, TRs by voxels. + fMRI dataset + """ + + if (Y.shape[1] <= self.n_c): + raise RuntimeError("More conditions than voxels! System is singular,\ + cannot decode.") + + # Sigma_s^{-1} B' + Sigma_s_btrp = self.space_noise_cov.Sigma_inv_x(tf.transpose(self.beta)) + # Y Sigma_s^{-1} B' + Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) + # (B Sigma_s^{-1} B')^{-1} + B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp).eval(session=self.sess) + + X_test = np.linalg.solve(B_Sigma_Btrp.T, Y_Sigma_Btrp.T).T + + return X_test diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py new file mode 100644 index 000000000..97b5a1f38 --- /dev/null +++ b/brainiak/matnormal/utils.py @@ -0,0 +1,65 @@ +import functools # https://danijar.com/structuring-your-tensorflow-models/ +import tensorflow as tf + + +def doublewrap(function): + """ + A decorator decorator, allowing to use the decorator to be used without + parentheses if not arguments are provided. All arguments must be optional. + """ + @functools.wraps(function) + def decorator(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): + return function(args[0]) + else: + return lambda wrapee: function(wrapee, *args, **kwargs) + return decorator + + +@doublewrap +def define_scope(function, scope=None, *args, **kwargs): + """ + A decorator for functions that define TensorFlow operations. The wrapped + function will only be executed once. Subsequent calls to it will directly + return the result so that operations are added to the graph only once. + The operations added by the function live within a tf.variable_scope(). If + this decorator is used with arguments, they will be forwarded to the + variable scope. The scope name defaults to the name of the wrapped + function. + """ + attribute = '_cache_' + function.__name__ + name = scope or function.__name__ + + @property + @functools.wraps(function) + def decorator(self): + if not hasattr(self, attribute): + with tf.variable_scope(name, *args, **kwargs): + setattr(self, attribute, function(self)) + return getattr(self, attribute) + return decorator + + +def xx_t(x): + """ x * x' """ + return tf.matmul(x, x, transpose_b=True) + + +def x_tx(x): + """ x' * x """ + return tf.matmul(x, x, transpose_a=True) + + +def quad_form(x, y): + """ x' * y * x """ + return tf.matmul(x, tf.matmul(y, x), transpose_a=True) + + +def scaled_I(x, size): + """ x * I_{size} """ + return tf.diag(tf.ones([size], dtype=tf.float64) * x) + + +def quad_form_trp(x, y): + """ x * y * x' """ + return tf.matmul(x, tf.matmul(y, x, transpose_b=True)) diff --git a/brainiak/utils/utils.py b/brainiak/utils/utils.py index 33a0a29dd..40adc64b6 100644 --- a/brainiak/utils/utils.py +++ b/brainiak/utils/utils.py @@ -20,6 +20,7 @@ from sklearn.utils import check_random_state from scipy.fftpack import fft, ifft import math +import tensorflow as tf """ @@ -795,3 +796,288 @@ def p_from_null(X, two_sided=False): p = 1 - max_null_ecdf(X[..., 0]) return p + + +def tf_solve_lower_triangular_kron(L, y): + """ Tensor flow function to solve L x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + """ + n = len(L) + if n == 1: + return tf.matrix_triangular_solve(L[0], y) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + + for i in range(na): + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + xinb = tf_solve_lower_triangular_kron(L[1:], t) + xina = xina - tf.reshape(tf.tile + (tf.slice(L[0], [i+1, i], [na-i-1, 1]), + [1, nb*col]), [(na-i-1)*nb, col]) * \ + tf.reshape(tf.tile(tf.reshape + (t, [-1, 1]), [na-i-1, 1]), [(na-i-1)*nb, col]) + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x + + +def tf_solve_upper_triangular_kron(L, y): + """ Tensor flow function to solve L^T x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + """ + n = len(L) + if n == 1: + return tf.matrix_triangular_solve(L[0], y, adjoint=True) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + + for i in range(na-1, -1, -1): + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + xinb = tf_solve_upper_triangular_kron(L[1:], t) + xt = xt - tf.reshape(tf.tile(tf.transpose + (tf.slice(L[0], [i, 0], [1, i])), + [1, nb*col]), [i*nb, col]) * \ + tf.reshape(tf.tile(tf.reshape + (t, [-1, 1]), [i, 1]), [i*nb, col]) + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x + + +def tf_kron_mult(L, x): + """ Tensorflow multiply with kronecker product matrix + Returs kron(L[0], L[1] ...) * x + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a square matrix of dimension n_i x n_i + + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + Returns + ------- + y : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + """ + n = len(L) + if n == 1: + return tf.matmul(L[0], x) + else: + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + xt = tf_kron_mult(L[1:], tf.transpose(tf.reshape(tf.transpose(x), [-1, nb]))) + y = tf.zeros_like(x) + for i in range(na): + ya, yb, yc = tf.split(y, [i*nb, nb, (na-i-1)*nb], 0) + yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), tf.transpose(tf.slice(L[0], [i,0], [1, na]))), [nb, col]) + y = tf.concat(axis=0, values=[ya, yb, yc]) + return y + + +def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): + """ Tensor flow function to solve L x = y + where L is a lower triangular matrix with a mask + + Arguments + --------- + L : 2-D tensor + Must be a tensorflow tensor and + must be a triangular matrix of dimension n x n + + y : 1-D or 2-D tensor + Dimension n x p + + mask : 1-D tensor + Dimension n x 1, should be 1 if element is valid, 0 if invalid + + lower : boolean (default : True) + True if L is lower triangular, False if upper triangular + + adjoint : boolean (default : False) + True if solving for L^x = y, False if solving for Lx = y + + Returns + ------- + x : 1-D or 2-D tensor + Dimension n x p, values at rows for which mask == 0 are set to zero + + """ + + zero = tf.constant(0, dtype=tf.int32) + mask_mat = tf.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1,1]), tf.reshape(mask, [1, -1])), zero)) + q = tf.to_int32(tf.sqrt(tf.to_double(tf.shape(mask_mat)[0]))) + L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q,q]) + + maskindex = tf.where(tf.not_equal(mask, zero)) + y_masked = tf.gather_nd(y, maskindex) + + x_s1 = tf.matrix_triangular_solve(L_masked, y_masked, lower=lower, adjoint=adjoint) + x = tf.scatter_nd(maskindex, x_s1, tf.to_int64(tf.shape(y))) + return x + + +def tf_solve_lower_triangular_masked_kron(L, y, mask): + """ Tensor flow function to solve L x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension [n_0*n_1*..n_(m-1)), p] + + mask: 1-D tensor + Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0 for don't care + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p, values at rows for which mask == 0 are set to zero + + """ + n = len(L) + if n == 1: + return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=False) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + zero = tf.constant(0, dtype=tf.int32) + + for i in range(na): + mask_b = tf.slice(mask, [i*nb], [nb]) + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + + if tf.reduce_sum(mask_b) != nb: + xinb = tf_solve_lower_triangular_masked_kron(L[1:], t, mask_b) + t_masked = tf_kron_mult(L[1:], xinb) + + else: #all valid - same as no mask + xinb = tf_solve_lower_triangular_kron(L[1:], t) + t_masked = t + xina = xina - tf.reshape(tf.tile + (tf.slice(L[0], [i+1, i], [na-i-1, 1]), + [1, nb*col]), [(na-i-1)*nb, col]) * \ + tf.reshape(tf.tile(tf.reshape + (t_masked, [-1, 1]), [na-i-1, 1]), [(na-i-1)*nb, col]) + + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x + + +def tf_solve_upper_triangular_masked_kron(L, y, mask): + """ Tensor flow function to solve L^T x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension [n_0*n_1*..n_(m-1)), p] + + mask: 1-D tensor + Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0 for don't care + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p, values at rows for which mask == 0 are set to zero + + """ + n = len(L) + if n == 1: + return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=True) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + zero = tf.constant(0, dtype=tf.int32) + L1_end_tr = [tf.transpose(x) for x in L[1:]] + + for i in range(na-1, -1, -1): + mask_b = tf.slice(mask, [i*nb], [nb]) + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + + if tf.reduce_sum(mask_b) != nb: + xinb = tf_solve_upper_triangular_masked_kron(L[1:], t, mask_b) + t_masked = tf_kron_mult(L1_end_tr, xinb) + else: + xinb = tf_solve_upper_triangular_kron(L[1:], t) + t_masked = t + + xt = xt - tf.reshape(tf.tile(tf.transpose + (tf.slice(L[0], [i, 0], [1, i])), + [1, nb*col]), [i*nb, col]) * \ + tf.reshape(tf.tile(tf.reshape + (t_masked, [-1, 1]), [i, 1]), [i*nb, col]) + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x + diff --git a/examples/matnormal/MN-RSA.ipynb b/examples/matnormal/MN-RSA.ipynb new file mode 100644 index 000000000..a464927e5 --- /dev/null +++ b/examples/matnormal/MN-RSA.ipynb @@ -0,0 +1,384 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MN-RSA derivation and example\n", + "\n", + "$$\n", + "\\DeclareMathOperator{\\Tr}{Tr}\n", + "\\newcommand{\\trp}{{^\\top}} % transpose\n", + "\\newcommand{\\trace}{\\text{Trace}} % trace\n", + "\\newcommand{\\inv}{^{-1}}\n", + "\\newcommand{\\mb}{\\mathbf{b}}\n", + "\\newcommand{\\M}{\\mathbf{M}}\n", + "\\newcommand{\\G}{\\mathbf{G}}\n", + "\\newcommand{\\A}{\\mathbf{A}}\n", + "\\newcommand{\\R}{\\mathbf{R}}\n", + "\\renewcommand{\\S}{\\mathbf{S}}\n", + "\\newcommand{\\B}{\\mathbf{B}}\n", + "\\newcommand{\\Q}{\\mathbf{Q}}\n", + "\\newcommand{\\mH}{\\mathbf{H}}\n", + "\\newcommand{\\U}{\\mathbf{U}}\n", + "\\newcommand{\\mL}{\\mathbf{L}}\n", + "\\newcommand{\\diag}{\\mathrm{diag}}\n", + "\\newcommand{\\etr}{\\mathrm{etr}}\n", + "\\renewcommand{\\H}{\\mathbf{H}}\n", + "\\newcommand{\\vecop}{\\mathrm{vec}}\n", + "\\newcommand{\\I}{\\mathbf{I}}\n", + "\\newcommand{\\X}{\\mathbf{X}}\n", + "\\newcommand{\\Y}{\\mathbf{Y}}\n", + "\\newcommand{\\Z}{\\mathbf{Z}}\n", + "\\renewcommand{\\L}{\\mathbf{L}}\n", + "$$\n", + "\n", + "We write the generative model for beta-series RSA. Note that for indicator-coded design matrix $\\X$ this is exactly equivalent to reshaping your data and directly computing the correlation, but allows for other features like convolving $\\X$ with an HRF. Here is the model: \n", + "\n", + "$$\n", + "\\Y = \\X\\beta + \\epsilon\n", + "$$\n", + "\n", + "where $\\Y$ is a TRs-by-voxels matrix of fMRI data, $\\X$ is a timepoint-by-feature design matrix that usually identifies conditions in the experiment, $\\beta$ is a feature-by-voxel matrix, $\\epsilon$ is a matrix of random perturbations (i.e. the noise). In conventional correlation-based RSA $\\epsilon \\sim \\mathcal{N}(0, \\sigma^2 \\I)$, i.e. the distribution of residulas is i.i.d. In Cai et al's BRSA $\\epsilon$ has temporal AR(1) noise structure and voxel-specific noise variance. Of research interest is the covariance of $\\beta$ in its row dimension, so we want to estimate as little as possible of anything else. We additionally import from Cai et al.'s BRSA the use of $\\X_0$, an unmodeled latent timecourse projected onto voxels by $\\beta_0$ as a way of capturing additional residual structure. \n", + "\n", + "The above model can be written as follows: \n", + "\n", + "$$\n", + "\\Y\\mid\\beta,\\X_0,\\beta_0,\\Sigma_t,\\sigma_s \\sim\\mathcal{MN}(\\X\\beta+\\X_0\\beta_0, \\Sigma_t, \\sigma_s\\trp\\mathbf{I}),\n", + "$$\n", + "\n", + "where $\\Sigma_t$ is a covariance matrix for the AR(1) covariance ($\\A\\inv$ in the BRSA paper), and $\\sigma_s$ is a spatial noise scaler that allows each voxel to have its own noise. This is not as general as voxel-specific AR coefficients, but has far fewer parameters and will allow us to tractably handle more complex temporal covariances. This tradeoff will behave differently in different datasets. \n", + "\n", + "Now we add a matrix-normal prior on $\\beta$, allowing us to marginalize. We parameterize the covariance in terms of its cholesky factor $\\L$. \n", + "\n", + "$$\n", + "\\beta\\sim\\mathcal{MN}(0,\\L\\L\\trp, \\sigma_s\\trp\\I)\\\\\n", + "\\Y\\mid\\X_0,\\beta_0,\\Sigma_t,\\sigma_s \\sim\\mathcal{MN}(\\X_0\\beta_0, \\Sigma_t + \\X\\L\\L\\trp\\X\\trp , \\vec{\\sigma_s}\\trp\\mathbf{I})\\\\\n", + "$$\n", + "\n", + "Using the same identity, we can marginalize over $\\beta_0$. \n", + "\n", + "$$\n", + "\\beta_0\\sim\\mathcal{MN}(0,\\I, \\sigma_s\\I)\\\\\n", + "\\Y\\mid\\X_0,\\beta_0,\\Sigma_t,\\sigma_s \\sim\\mathcal{MN}(0, \\Sigma_t + \\X\\L\\L\\trp\\X\\trp + \\X_0\\X_0\\trp , \\sigma_s\\mathbf{I})\n", + "$$\n", + "\n", + "Now, the temporal covariance is the sum of an autoregressive term, a low-rank noise term, and our term of interest. \n", + "\n", + "Next, we apply some computational tricks. Consider the matrix normal (log) density: \n", + "\n", + "$$\n", + "P(X; M, U, V) = \\frac{\\exp\\left(-\\frac12\\Tr\\left[V\\inv(X-M)\\trp U\\inv(X-M)\\right]\\right)}{(2\\pi)^{np/2}|U|^{p/2}|V|^{n/2}}\\\\\n", + "2 \\log P(X; M, U, V) = -\\Tr\\left[V\\inv(X-M)\\trp U\\inv(X-M)\\right]-np\\log 2\\pi-p\\log|U|-n\\log|V|\n", + "$$\n", + "\n", + "Here $n$ and $p$ are the row and column dimension of $M$. Note that both the determinant and the inverse are $O(n^3)$ and $O(p^3)$ for the two covariances. Furthermore, computing the determinant and logging it will be unstable. So instead we can take the cholesky decomposition of both covariances, at which point the log-determinant is just 2 times the sum of the diagonal elements. Then, we recognize that the term inside of the trace can be computed by our favorite triangular matrix solver using the cholesky we already paid for. Let $A = V, B = (X-M)\\trp$. Then a solver for X in $AX=B$ will give us exactly $V^{-1}(X-M)\\trp$. We play the same exact trick for $A=U, B=(X-M)$ (though of course we center the brain first so $M=0$. \n", + "\n", + "Cai et al. additionally apply the matrix inversion lemma twice so that they invert something feature-by-feature instead of time-by-time. Doing this naively will not help us in this version because we're still stuck with doing the determinant (which is cubic in time). Here is the expression: \n", + "\n", + "$$\n", + "\\begin{aligned}\n", + "\\Sigma_Y :=& \\Sigma_t + \\X\\L\\L\\trp\\X\\trp + \\X_0\\X_0\\trp \\\\\n", + "\\mbox{let } \\Z :=& \\Sigma_t + \\X\\L\\L\\trp\\X\\trp\\\\\n", + "\\Sigma_Y\\inv =& (\\Z + \\X_0\\X_0\\trp)\\inv \\\\\n", + "=& \\Z\\inv - \\Z\\inv\\X_0(\\I + \\X_0\\trp\\Z\\inv\\X_0)\\inv\\X_0\\trp\\Z\\inv\\\\\n", + "\\Z\\inv =& \\Sigma_t\\inv - \\Sigma_t\\inv \\X\\L(\\I+\\L\\trp\\X\\trp\\Sigma_t\\inv\\L\\X)\\inv\\L\\trp\\X\\trp \\Sigma_t\\inv\\\\\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "That said, if the inverse and determinant of $\\Sigma_t\\inv$ is trivial (as in the case of AR1 but not generally), we can apply the matrix determinant lemma:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + "\\Sigma_Y :=& \\Sigma_t + \\X\\L\\L\\trp\\X\\trp + \\X_0\\X_0\\trp \\\\\n", + "\\mbox{let } \\Z :=& \\Sigma_t + \\X\\L\\L\\trp\\X\\trp\\\\\n", + "|\\Sigma_Y| =& |\\Z + \\X_0\\X_0\\trp| \\\\\n", + "=& |\\Z|\\times|\\I + \\X_0\\trp\\Z\\inv\\X_0|\\\\\n", + "\\Z\\inv =& \\Sigma_t\\inv - \\Sigma_t\\inv \\X\\L(\\I+\\L\\trp\\X\\trp\\Sigma_t\\inv\\X\\L)\\inv\\L\\trp\\X\\trp \\Sigma_t\\inv\\\\\n", + "|\\Z| =& |\\Sigma_t| \\times|\\I+\\L\\trp\\X\\trp\\Sigma_t\\inv\\X\\L|\\\\\n", + "=& \\frac{|\\I+\\L\\trp\\X\\trp\\Sigma_t\\inv\\X\\L|}{|\\Sigma_t\\inv|} \\\\\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Now we notice that we can still apply our cholesky-inverse-solve trick because the term in the inverse and determinant with the lemmas applied is identical. As long as the inverse and determinant of the temporal noise covariance is computable in better than cubic time, this is useful to do. Currently the lemma trick is not being done in the code (but the cholesky trick is). \n", + "\n", + "Now here is an example: " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADJ9JREFUeJzt3WusZWV9x/HvzxkEBzFAAS8MKWAI\nraEWyKRBbWwj0iIS8EVfoKWZVpN50YtoNAilqWnSNE00XpI2GgIoqYgvECshXpigxjSthGEcrkOF\nAoWB0aExVYIWZsK/L/aaJ4fJjDOz11p7nTPz/SQnZ1/W3v//PnPOb5619nr2k6pCkgBeMXUDkpYP\nA0FSYyBIagwESY2BIKkxECQ1kwdCkguT/GeSR5NcNXKtU5J8N8nWJA8muWLMekvqrkrywyS3L6DW\nsUluSfJw9zrfMnK9D3c/yweS3JzkqIGf/4YkO5I8sOS245NsTPJI9/24ket9ovt53pfka0mOHbPe\nkvs+mqSSnDBUvf2ZNBCSrAL+GXgX8CbgvUneNGLJXcBHquo3gfOAvxi53m5XAFsXUAfgs8C3quo3\ngN8es26Sk4EPAuuq6ixgFXDZwGW+CFy4x21XAXdW1RnAnd31MettBM6qqjcDPwKuHrkeSU4BLgCe\nHLDWfk09Qvgd4NGqeqyqXgS+Alw6VrGq2l5Vm7vLzzH7Yzl5rHoASdYC7wauG7NOV+s1wNuB6wGq\n6sWq+t+Ry64GXpVkNbAGeGbIJ6+q7wM/3ePmS4Ebu8s3Au8Zs15V3VFVu7qrPwDWjlmv82ngSmCh\nZw5OHQgnA08tub6Nkf9Ad0tyKnAOcNfIpT7D7B/2pZHrAJwOPAt8odtFuS7J0WMVq6qngU8y+19s\nO/CzqrpjrHpLvLaqtnc9bAdOWkDN3d4PfHPMAkkuAZ6uqnvHrLM3UwdC9nLb6ImY5NXAV4EPVdXP\nR6xzMbCjqu4Zq8YeVgPnAp+rqnOA5xl2OP0y3b77pcBpwBuAo5NcPla9qSW5htlu500j1lgDXAP8\n7Vg1fpWpA2EbcMqS62sZeMi5pyRHMAuDm6rq1jFrAW8DLknyBLPdoXck+dKI9bYB26pq96jnFmYB\nMZZ3Ao9X1bNVtRO4FXjriPV2+0mS1wN033eMXTDJeuBi4I9r3AlAb2QWsPd2vzdrgc1JXjdizWbq\nQLgbOCPJaUleyeyA1G1jFUsSZvvXW6vqU2PV2a2qrq6qtVV1KrPX9p2qGu1/0Kr6MfBUkjO7m84H\nHhqrHrNdhfOSrOl+tuezmIOntwHru8vrga+PWSzJhcDHgEuq6hdj1qqq+6vqpKo6tfu92Qac2/3b\njq+qJv0CLmJ25Pa/gGtGrvW7zHZJ7gO2dF8XLeh1/j5w+wLqnA1s6l7jvwLHjVzv74CHgQeAfwGO\nHPj5b2Z2fGInsz+ODwC/xuzdhUe678ePXO9RZse6dv/OfH7Menvc/wRwwti/N7u/0hWVpMl3GSQt\nIwaCpMZAkNQYCJIaA0FSs2wCIckG61lvudU6HOottWwCAVj0D8F6K7feofzapqjXLKdAkDSxhZ6Y\ntOqYo2v1iXv/bImXnnueVxyz94l5Rz7+y8F72ckLHMGRgz+v9Q6tWodKvf/jeV6sF/Y2mfBlVg9a\ndX/FTjyWtf/w5wf9uNPft2WEbqTDx1115wFt5y6DpKZXICzy8xAljW/uQJjg8xAljazPCGGhn4co\naXx9AmGyz0OUNI4+gXBAn4eYZEOSTUk2vfTc8z3KSRpbn0A4oM9DrKprq2pdVa3b13kGkpaHPoGw\n0M9DlDS+uU9MqqpdSf4S+DazFXtuqKoHB+tM0sL1OlOxqr4BfGOgXiRNzDMVJTULnctw5OO/nGte\nwmNfPnuues6BkA6OIwRJjYEgqTEQJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNgSCpMRAkNQaCpMZA\nkNQsdLbjvOadtegsSengOEKQ1BgIkhoDQVLTZym3U5J8N8nWJA8muWLIxiQtXp+DiruAj1TV5iTH\nAPck2VhVDw3Um6QFm3uEUFXbq2pzd/k5YCsu5SataIMcQ0hyKnAOcNcQzydpGr3PQ0jyauCrwIeq\n6ud7uX8DsAHgKNb0LSdpRL1GCEmOYBYGN1XVrXvbZunajkdwZJ9ykkbW512GANcDW6vqU8O1JGkq\nfUYIbwP+BHhHki3d10UD9SVpAn0We/03IAP2ImlinqkoqVkRsx3n5SxJ6eA4QpDUGAiSGgNBUmMg\nSGoMBEmNgSCpMRAkNQaCpMZAkNQYCJIaA0FSYyBIagwESc0hPdtxXs6S1OHKEYKkxkCQ1BgIkpre\ngZBkVZIfJrl9iIYkTWeIEcIVzJZxk7TC9V2oZS3wbuC6YdqRNKW+I4TPAFcCLw3Qi6SJ9Vm56WJg\nR1Xds5/tNiTZlGTTTl6Yt5ykBei7ctMlSZ4AvsJsBacv7bmRaztKK8fcgVBVV1fV2qo6FbgM+E5V\nXT5YZ5IWzvMQJDWDzGWoqu8B3xviuSRNxxGCpMbZjgNylqRWOkcIkhoDQVJjIEhqDARJjYEgqTEQ\nJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNgSCpcbbjMuAsSS0XjhAkNQaCpMZAkNT0Xbnp2CS3JHk4\nydYkbxmqMUmL1/eg4meBb1XVHyV5JbBmgJ4kTWTuQEjyGuDtwJ8CVNWLwIvDtCVpCn12GU4HngW+\n0C0Hf12SowfqS9IE+gTCauBc4HNVdQ7wPHDVnhu5tqO0cvQJhG3Atqq6q7t+C7OAeBnXdpRWjj5r\nO/4YeCrJmd1N5wMPDdKVpEn0fZfhr4CbuncYHgP+rH9LkqbSKxCqaguwbqBeJE3MMxUlNc52XMGc\nJamhOUKQ1BgIkhoDQVJjIEhqDARJjYEgqTEQJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNsx0PQ/PO\nWvz2M/M97m92/NZcj1u0u89eNXULk3OEIKkxECQ1BoKkpu/ajh9O8mCSB5LcnOSooRqTtHhzB0KS\nk4EPAuuq6ixgFXDZUI1JWry+uwyrgVclWc1soddn+rckaSp9Fmp5Gvgk8CSwHfhZVd0xVGOSFq/P\nLsNxwKXAacAbgKOTXL6X7VzbUVoh+uwyvBN4vKqeraqdwK3AW/fcyLUdpZWjTyA8CZyXZE2SMFvb\nceswbUmaQp9jCHcxW/F5M3B/91zXDtSXpAn0Xdvx48DHB+pF0sQ8U1FS42xHHbB5Zy3+/Un3L7Se\n5ucIQVJjIEhqDARJjYEgqTEQJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNgSCpMRAkNc521OicJbly\nOEKQ1BgIkhoDQVKz30BIckOSHUkeWHLb8Uk2Jnmk+37cuG1KWoQDGSF8Ebhwj9uuAu6sqjOAO7vr\nkla4/QZCVX0f+OkeN18K3NhdvhF4z8B9SZrAvMcQXltV2wG67ycN15KkqYx+HkKSDcAGgKNYM3Y5\nST3MO0L4SZLXA3Tfd+xrQ9d2lFaOeQPhNmB9d3k98PVh2pE0pQN52/Fm4D+AM5NsS/IB4B+BC5I8\nAlzQXZe0wu33GEJVvXcfd50/cC+SJuaZipIaZztq2Vr0LMk/5Oy5HncocYQgqTEQJDUGgqTGQJDU\nGAiSGgNBUmMgSGoMBEmNgSCpMRAkNQaCpMZAkNQYCJIaZzvqgN199qqpWzgg885afOzL8z3u9Pdt\nmetxy5EjBEmNgSCpMRAkNfOu7fiJJA8nuS/J15IcO26bkhZh3rUdNwJnVdWbgR8BVw/cl6QJzLW2\nY1XdUVW7uqs/ANaO0JukBRviGML7gW/u684kG5JsSrJpJy8MUE7SWHoFQpJrgF3ATfvaxqXcpJVj\n7hOTkqwHLgbOr6oariVJU5krEJJcCHwM+L2q+sWwLUmayrxrO/4TcAywMcmWJJ8fuU9JCzDv2o7X\nj9CLpIl5pqKkxtmOUmfeWYuH0ixJRwiSGgNBUmMgSGoMBEmNgSCpMRAkNQaCpMZAkNQYCJIaA0FS\nYyBIagwESY2BIKlxtqPU06E0S9IRgqTGQJDUzLWU25L7PpqkkpwwTnuSFmnepdxIcgpwAfDkwD1J\nmshcS7l1Pg1cCbgmg3SImOsYQpJLgKer6t6B+5E0oYN+2zHJGuAa4A8OcPsNwAaAo1hzsOUkLdA8\nI4Q3AqcB9yZ5gtnKz5uTvG5vG7u2o7RyHPQIoaruB07afb0LhXVV9T8D9iVpAvMu5SbpEDTvUm5L\n7z91sG4kTcozFSU1BoKkxtmO0kQWOUvyhb/+9wPazhGCpMZAkNQYCJIaA0FSYyBIagwESY2BIKkx\nECQ1BoKkxkCQ1BgIkhoDQVJjIEhqUrW4T1FP8izw3/u4+wRgkR/DZr2VW+9Qfm1j1fv1qjpxfxst\nNBB+lSSbqmqd9ay3nGodDvWWcpdBUmMgSGqWUyBcaz3rLcNah0O9ZtkcQ5A0veU0QpA0MQNBUmMg\nSGoMBEmNgSCp+X8Kn/zycjAIDgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import scipy\n", + "from scipy.stats import norm\n", + "from scipy.special import expit as inv_logit\n", + "import numpy as np\n", + "from numpy.linalg import cholesky\n", + "import matplotlib.pyplot as plt\n", + "\n", + "def rmn(rowcov, colcov):\n", + " # generate random draws from a zero-mean matrix-normal distribution\n", + " Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0])))\n", + " return(cholesky(rowcov).dot(Z).dot(cholesky(colcov)))\n", + "\n", + "\n", + "def make_ar1_with_lowrank_covmat(size, rank):\n", + " \"\"\" Generate a random covariance that is AR1 with added low rank structure\n", + " \"\"\"\n", + " sigma = np.abs(norm.rvs())\n", + " rho = np.random.uniform(-1,0)\n", + " offdiag_template = scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size-2)])\n", + " diag_template = np.diag(np.r_[0,np.ones(size-2),0])\n", + " I = np.eye(size)\n", + "\n", + " prec_matrix = (I - rho * offdiag_template + rho**2 * diag_template) / (sigma**2)\n", + " lowrank_matrix = norm.rvs(size=(size, rank))\n", + " return np.linalg.inv(prec_matrix) + lowrank_matrix.dot(lowrank_matrix.T)\n", + "\n", + "\n", + "def gen_data(n_T, n_V, space_cov, time_cov):\n", + "\n", + " n_C = 16\n", + " U = np.zeros([n_C, n_C])\n", + " U = np.eye(n_C) * 0.6\n", + " U[8:12, 8:12] = 0.8\n", + " for cond in range(8, 12):\n", + " U[cond,cond] = 1\n", + "\n", + " beta = rmn(U, space_cov)\n", + "\n", + " X = rmn(np.eye(n_T), np.eye(n_C))\n", + "\n", + " Y_hat = X.dot(beta)\n", + "\n", + " Y = Y_hat + rmn(time_cov, space_cov)\n", + "\n", + " return beta, X, Y, U\n", + "\n", + "n_T = 100\n", + "n_V = 80\n", + "n_C = 16\n", + "\n", + "spacecov_true = np.diag(np.abs(norm.rvs(size=(n_V))))\n", + "timecov_true = make_ar1_with_lowrank_covmat(n_T, rank=7)\n", + "\n", + "true_beta, true_X, true_Y, true_U = gen_data(n_T, n_V, spacecov_true, timecov_true)\n", + "\n", + "%matplotlib inline\n", + "plt.matshow(true_U)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That is the target matrix. Now we noisify it using a simple synthetic brain data generator, and recover it with MN-RSA. We intentionally code up MN-RSA here from the building blocks the toolkit provides so we can illustrate how easy it is to build new models: " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Optimization terminated with:\n", + " Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'\n", + " Objective function value: 20082.401254\n", + " Number of iterations: 144\n", + " Number of functions evaluations: 163\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEgdJREFUeJzt3XuMnOV1x/Hf8ezFu/b6bjDYDmsc\nZBoMrR0XEUAEcGhcQICiqiIKkdPQgpo2AZKUgKwWRVWlSkSBqC2kFAgoIFBFSIKAABaBoiZAA8bG\nNnaDIQZf8YXaxnvx7uye/jHjp8t27d05M+87Y/h+pNVeZs6eZ2dnf/vO5Zlj7i4AkKRx9V4AgMZB\nIABICAQACYEAICEQACQEAoCk7oFgZsvM7L/NbJOZ3ZRxr7lm9pyZbTCz9WZ2XZb9hvQtmNlrZvZ4\nDr2mmNkjZrax/HN+JuN+N5Qvy3Vm9pCZja/x97/XzHaZ2bohX5tmZivN7M3y+6kZ97u1fHm+bmY/\nNbMpWfYbctq3zczNbEat+o2mroFgZgVJ/yLpjyV9StIXzexTGbYsSvqWu/+epLMk/VXG/Q67TtKG\nHPpI0g8kPeXup0r6/Sz7mtlsSd+QtMTdF0oqSLqyxm3uk7Rs2NdukvSsu58i6dny51n2Wylpobuf\nIem3km7OuJ/MbK6kiyS9W8Neo6r3EcKZkja5+9vu3ifpYUmXZ9XM3Xe4+6ryxx+o9McyO6t+kmRm\ncyRdIunuLPuUe02SdJ6keyTJ3fvcfV/GbZsktZlZk6R2Sdtr+c3d/QVJ7w/78uWS7i9/fL+kK7Ls\n5+7PuHux/OlLkuZk2a/sNkk3Ssr1mYP1DoTZkrYM+XyrMv4DPczMOiUtkvRyxq1uV+kXO5hxH0k6\nWdJuST8q30S528wmZNXM3bdJ+p5K/8V2SNrv7s9k1W+I4919R3kNOyQdl0PPw74q6RdZNjCzyyRt\nc/c1WfYZSb0DwUb4WuaJaGYTJf1E0vXufiDDPpdK2uXur2bVY5gmSYsl3enuiyR1qbaH0x9Svu1+\nuaR5kk6UNMHMrsqqX72Z2QqVbnY+mGGPdkkrJP1dVj2Opt6BsFXS3CGfz1GNDzmHM7NmlcLgQXd/\nNMteks6RdJmZbVbp5tCFZvZAhv22Strq7oePeh5RKSCy8jlJv3P33e7eL+lRSWdn2O+w98zsBEkq\nv9+VdUMzWy7pUklf8mw3AM1XKWDXlK83cyStMrNZGfZM6h0Iv5F0ipnNM7MWle6QeiyrZmZmKt2+\n3uDu38+qz2HufrO7z3H3TpV+tl+6e2b/Qd19p6QtZrag/KWlkt7Iqp9KNxXOMrP28mW7VPncefqY\npOXlj5dL+nmWzcxsmaTvSLrM3buz7OXua939OHfvLF9vtkpaXP7dZs/d6/om6WKV7rl9S9KKjHud\nq9JNktclrS6/XZzTz3m+pMdz6PMHkl4p/4w/kzQ1437flbRR0jpJP5bUWuPv/5BK90/0q/THcbWk\n6So9uvBm+f20jPttUum+rsPXmR9m2W/Y6Zslzcj6enP4zcpNAaDuNxkANBACAUBCIABICAQACYEA\nIGmYQDCza+hHv0br9XHoN1TDBIKkvC8E+h27/T7KP1s9+iWNFAgA6izXJybNmFbwzrnNI562e++A\nZk4vjHja+vdmhvqNKx75tGJvl5rGj7wRcLAp1G7krVqH+3V3qal95H4+8o9dlYGuLhUmjNyvqSf2\nPQvdR75A+wa61VJoH/nEweBGz8LIF0xfsVstTUfoJUnB63R/x8i/+GJPl5rajrxptNAX6+fjRr7C\n9B/qUnPrUfod7Ku4V8/AAfUN9BzlGloSveqHdM5t1n89PXf0Mw5z+m1fC/Vr2x37RfXMGPVyG9Hg\nyFk3qv6O2DotmOXT18YKp6zZG6qz7t5Q3eCUibF+vf2hup0XxP7xdGwdCNUV22MH6FN+vWX0Mw3z\n650Pjel83GQAkFQVCHm+HiKA7IUDoQ6vhwggY9UcIeT6eogAsldNINTt9RABZKOaQBjT6yGa2TVm\n9oqZvbJ7b+zeWAD5qCYQxvR6iO5+l7svcfclR3qeAYDGUE0g5Pp6iACyF35ikrsXzeyvJT2t0sSe\ne919fc1WBiB3VT1T0d2flPRkjdYCoM54piKAJNe9DOvfmxnal7D2hjtC/T57bWwXaev+2F6Gvo5Y\nvvYHh60VYlsE1N8W+/k2fyH2XP9pG2OPLu05I3Yn9ORNsb0aA8HLZf/JsT+jcf2xdbaeWvnMlsF9\nY9towxECgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABIct3tOK4Y\nm6YU3bX4H/96V6ju81d8OVQ3ac8HobreedNDdXtPaw3VNX8Q22XXtidW1zst9n/Hgi/BuWdRbJ2f\n/OaLobqB8xeH6rafMz5UN37HwYprxvWP7cLkCAFAQiAASAgEAEk1o9zmmtlzZrbBzNab2XW1XBiA\n/FVzp2JR0rfcfZWZdUh61cxWuvsbNVobgJyFjxDcfYe7ryp//IGkDWKUG3BMq8l9CGbWKWmRpJdr\n8f0A1EfVgWBmEyX9RNL17n5ghNPTbMdib1e17QBkqKpAMLNmlcLgQXd/dKTzDJ3t2DQ++HrjAHJR\nzaMMJukeSRvc/fu1WxKAeqnmCOEcSV+WdKGZrS6/XVyjdQGog2qGvf6npNioGwANiWcqAkhy3e04\n2CT1zKj8oCI6azG6a/Hpn/04VHf67ZXPrZSkCdsHQ3WFQ8Hdh9Njl2dxYqhMA62xdTb/v8esxqZ1\nb3B35adPC9U1/WpdqK5zdVuozjsDT/exsf3OOUIAkBAIABICAUBCIABICAQACYEAICEQACQEAoCE\nQACQEAgAEgIBQEIgAEgIBABJrrsdZdJgc+VlfR2x3IrOWozuWlx7/R2huj9c8ZehutZ9sV2EHvw3\nMPP1/mC/2O7Kpq5iqM4GY5fLtgsmh+qOn7QwVLfn9NhsxylvVf57GHx7bL90jhAAJAQCgIRAAJDU\nYi5DwcxeM7PHa7EgAPVTiyOE61Qa4wbgGFftoJY5ki6RdHdtlgOgnqo9Qrhd0o2SYq8SCqChVDO5\n6VJJu9z91VHO93+zHbuZ7Qg0smonN11mZpslPazSBKcHhp/pQ7Md25ntCDSycCC4+83uPsfdOyVd\nKemX7n5VzVYGIHc8DwFAUpO9DO7+vKTna/G9ANQPRwgAklx3O3pB6u+ofCdaf/C+yN5500N10VmL\n0V2Lv/mHO0N1F/3pV0J1k96K7SI8MD823LG5K3Z5HuiM7Qbs2HooVDd580CoruuEllDdhJ2N92g9\nRwgAEgIBQEIgAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACDJd7ajJAuM3Sv0\nxnrtPa01VFc4FJsNGJ21GN21uPLf7wvVLf772K7M4146EKobmBjbDTjYHJsJGZ3t2DM99v9x1hPv\nhuo2XfuJUN38hyv/PYw7NLadnBwhAEgIBAAJgQAgqXZy0xQze8TMNprZBjP7TK0WBiB/1d6p+ANJ\nT7n7n5hZi6T2GqwJQJ2EA8HMJkk6T9JXJMnd+yT11WZZAOqhmpsMJ0vaLelH5XHwd5sZo5mAY1g1\ngdAkabGkO919kaQuSTcNP9PQ2Y4DXcx2BBpZNYGwVdJWd3+5/PkjKgXEhwyd7ViYwAEE0Miqme24\nU9IWM1tQ/tJSSW/UZFUA6qLaRxm+LunB8iMMb0v6s+qXBKBeqgoEd18taUmN1gKgznimIoAk192O\nTT3S9LWB2Y5tsV1vzR/Edr31To/182C8RmctRnctrvrb2CzJ8752Taiu/Z3Yo0v9J8V2qw42x+qa\nu4LXlwWzQnUz18RmO3bNm1xxzeCWwpjOxxECgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKA\nhEAAkBAIABICAUBCIABIct3tWOguasqavRXXbf7CzFC/tj2x3WvFiaEyzXy9P1R3YH6sYXTWYnTX\n4gt33BWqu2X3aaG6qO6B2CzJ9ed3xPqdu2D0M41g/N7Y9aVnZuU/n48b2w5ejhAAJAQCgIRAAJBU\nO9vxBjNbb2brzOwhMxtfq4UByF84EMxstqRvSFri7gslFSRdWauFAchftTcZmiS1mVmTSoNet1e/\nJAD1Us2glm2SvifpXUk7JO1392dqtTAA+avmJsNUSZdLmifpREkTzOyqEc6XZjv2DXTHVwogc9Xc\nZPicpN+5+25375f0qKSzh59p6GzHlkJ7Fe0AZK2aQHhX0llm1m5mptJsxw21WRaAeqjmPoSXVZr4\nvErS2vL3ij23FUBDqHa24y2SbqnRWgDUGc9UBJDkuttRg4Oy7t6Ky6ZtHAi1650Wy7uB1tguybHu\nKBuuuSs2429gYmxXX3TWYnTX4ndnrg/V/dP/nBSqmzk+tgt0/awLQ3Ut+/pCdTYYu54dPLHyJwQP\nNI/tfBwhAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAJN/djoWC\nBqdUPsdwzxmFUDuLbZJUc2yznJq6iqG6A52xcRaDzbHdlf0ntYbqoqK7Fr8+9Z1Q3a3vzw/VWVdP\nqO7AmbHZo637Y7tcW/ZXvkty3Bj/FjhCAJAQCAASAgFAMmogmNm9ZrbLzNYN+do0M1tpZm+W30/N\ndpkA8jCWI4T7JC0b9rWbJD3r7qdIerb8OYBj3KiB4O4vSHp/2Jcvl3R/+eP7JV1R43UBqIPofQjH\nu/sOSSq/P652SwJQL5nfqfih2Y5FZjsCjSwaCO+Z2QmSVH6/60hn/NBsxyZmOwKNLBoIj0laXv54\nuaSf12Y5AOppLA87PiTpRUkLzGyrmV0t6R8lXWRmb0q6qPw5gGPcqHsZ3P2LRzhpaY3XAqDOeKYi\ngCTf3Y7ust7+issmb4rNwNuzKFbXujeWk9FZfR1bD+Xab7A5ttuxeyA2SzI6azG6a/Fvpr0Vqnuu\nGOs3YXvl12lJUmyzqortlf/Z+hiv0hwhAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgE\nAAmBACAhEAAkBAKAJNfdjv0dTdp5QeVz8AbaYtvCPvnNF0N19unTQnXbLpgcqpu8OTaEsmd6LM+b\nu2K7JNef3xGrm3VhqC46azG6a/HJ154J1Z3/538Rqjs4O/bn1zu98prBMbbiCAFAQiAASAgEAEl0\ntuOtZrbRzF43s5+a2ZRslwkgD9HZjislLXT3MyT9VtLNNV4XgDoIzXZ092fcvVj+9CVJczJYG4Cc\n1eI+hK9K+sWRThw6yq3Y01WDdgCyUlUgmNkKSUVJDx7pPENHuTW1TaimHYCMhZ+YZGbLJV0qaam7\nx57pAqChhALBzJZJ+o6kz7o7I52Bj4jobMd/ltQhaaWZrTazH2a8TgA5iM52vCeDtQCoM56pCCDJ\ndbdjoc/VsbXynX37T44tc+D8xaG6pl+tG/1MIzh+0sJQXdcJsZmJs554N1TXu2BWqK773AWhupZ9\nfaG6A2dWvjNWis9ajO5afP7ufwvVLbvkS6G6tnkTK67ZNsaNoxwhAEgIBAAJgQAgIRAAJAQCgIRA\nAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAJNfdjj7OVGyvPIPG9cdeoW37OeNDdZ2r20J1e06P\n9ZuwczBUt+naT4TqZq6J9Ru/N7aL0AZjv7/W/bF1KjYKNDxrMbpr8aknjvhSpEe19KqrK66xgbH9\nDjhCAJAQCACS0Ci3Iad928zczGZkszwAeYqOcpOZzZV0kaTYy/YAaDihUW5lt0m6URIzGYCPiNB9\nCGZ2maRt7r6mxusBUEcVP85iZu2SVkj6ozGe/xpJ10hSS/vUStsByFHkCGG+pHmS1pjZZpUmP68y\nsxFfynfobMfmVmY7Ao2s4iMEd18r6bjDn5dDYYm776nhugDUQXSUG4CPoOgot6Gnd9ZsNQDqimcq\nAkgIBACJuef3vKLJLcf72bOOegtkRD2nxmYRjt9xMFTnzYVQXc/sfB9Fad98IFTXNW9yqC6yU1WS\nDp4Yq2vZH7tuFttj2x17p4fKNGNd5fNKJallXzFU9+wDlQ9fP/PzW/TKmt5RLxiOEAAkBAKAhEAA\nkBAIABICAUBCIABICAQACYEAICEQACQEAoCEQACQEAgAEgIBQJLrbkcz2y3pnSOcPENSni/DRr9j\nt99H+WfLqt9J7j5ztDPlGghHY2avuPsS+tGvkXp9HPoNxU0GAAmBACBppEC4i370a8BeH4d+ScPc\nhwCg/hrpCAFAnREIABICAUBCIABICAQAyf8CmWtrlo7vWjkAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import tensorflow as tf\n", + "from brainiak.matnormal.covs import (CovDiagonal, CovAR1,\n", + " CovUnconstrainedInvCholesky)\n", + "from brainiak.utils.utils import cov2corr\n", + "from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row\n", + "from tensorflow.contrib.opt import ScipyOptimizerInterface\n", + "\n", + "space_cov = CovDiagonal(size=n_V)\n", + "time_cov = CovAR1(size=n_T)\n", + "rsa_cov = CovUnconstrainedInvCholesky(size=n_C)\n", + "\n", + "# inputs into TF\n", + "X = tf.constant(true_X)\n", + "Y = tf.constant(true_Y)\n", + "\n", + "params = rsa_cov.get_optimize_vars() +\\\n", + " time_cov.get_optimize_vars() +\\\n", + " space_cov.get_optimize_vars()\n", + "\n", + "# tf session\n", + "sess = tf.Session()\n", + "\n", + "# initialize\n", + "sess.run(tf.global_variables_initializer())\n", + "\n", + "# construct loss (marginal likelihood constructed automatically)\n", + "loss = -(time_cov.logp +\n", + " space_cov.logp +\n", + " rsa_cov.logp +\n", + " matnorm_logp_marginal_row(Y,\n", + " row_cov=time_cov,\n", + " col_cov=space_cov,\n", + " marg=X, marg_cov=rsa_cov))\n", + "\n", + "# tie into scipy's optimizers\n", + "optimizer = ScipyOptimizerInterface(loss,\n", + " var_list=params,\n", + " method=\"L-BFGS-B\")\n", + "\n", + "\n", + "optimizer.minimize(sess)\n", + "\n", + "U = rsa_cov.Sigma.eval(session=sess)\n", + "C = cov2corr(U)\n", + "plt.matshow(C)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In practice, MN-RSA is actually implemented in `brainiak.matnormal`, including the nuisance regressor estimation of Cai et al. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Optimization terminated with:\n", + " Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'\n", + " Objective function value: 17691.188228\n", + " Number of iterations: 8723\n", + " Number of functions evaluations: 9173\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEftJREFUeJzt3X1s3fV1x/HPsa8d23mObWiwozl0\nNIyibKDQptB1XSksAxT4YxK0Zc2ALerWjYBaUVCkof4xqVKrQqd1MMZD0YjoJEpbxCglokBXbc0I\ngYSEAAltIIFATBAJtmtfP5z9cW++MpYd+557f7/rwPslRfHDPT4/X9sf/+71PfeYuwsAJKmh3gcA\nYPYgEAAkBAKAhEAAkBAIABICAUBS90AwszVm9pKZ7TWzGzPutczMnjCz3Wa2y8w2ZNlvXN9GM3vW\nzB7OodciM3vAzF4sf56fyrjf9eXrcqeZ3W9mLTX++Heb2SEz2znubUvMbLOZ7Sn/vzjjft8uX587\nzOzHZrYoy37j3vd1M3Mz66hVv+nUNRDMrFHS9yX9uaQzJH3BzM7IsOWIpK+5+x9IWi3pqxn3O2aD\npN059JGk70l61N1Pl/SHWfY1sy5J10pa5e5nSmqUdEWN2/xA0poJb7tR0uPufpqkx8uvZ9lvs6Qz\n3X2lpJcl3ZRxP5nZMkkXSHqthr2mVe8zhE9I2uvuv3H3oqQfSro0q2buftDdt5Vffk+lH5aurPpJ\nkpl1S7pY0p1Z9in3WiDpM5LukiR3L7r7uxm3LUhqNbOCpDZJb9Tyg7v7LyW9M+HNl0q6t/zyvZIu\ny7Kfuz/m7iPlV38tqTvLfmW3SLpBUq6PHKx3IHRJ2j/u9QPK+Af0GDPrkXSWpC0Zt7pVpS/sWMZ9\nJOlUSb2S7infRLnTzOZm1czdX5f0HZV+ix2UdMTdH8uq3zgnu/vB8jEclHRSDj2PuVrSz7JsYGZr\nJb3u7tuz7DOZegeCTfK2zBPRzOZJ+pGk69z9aIZ9LpF0yN2fyarHBAVJZ0u6zd3PktSv2p5Ov0/5\ntvulkpZLOkXSXDO7Mqt+9WZmG1W62bkpwx5tkjZK+sesehxPvQPhgKRl417vVo1POScysyaVwmCT\nuz+YZS9J50laa2b7VLo59Dkzuy/DfgckHXD3Y2c9D6gUEFn5vKTfunuvuw9LelDSuRn2O+YtM1sq\nSeX/D2Xd0MzWSbpE0pc82wGgj6oUsNvL3zfdkraZ2Ucy7JnUOxCelnSamS03s2aV7pB6KKtmZmYq\n3b7e7e7fzarPMe5+k7t3u3uPSp/bL9w9s9+g7v6mpP1mtqL8pvMlvZBVP5VuKqw2s7bydXu+8rnz\n9CFJ68ovr5P00yybmdkaSd+QtNbdB7Ls5e7Pu/tJ7t5T/r45IOns8tc2e+5e13+SLlLpnttXJG3M\nuNenVbpJskPSc+V/F+X0eX5W0sM59PkjSVvLn+NPJC3OuN83Jb0oaaek/5A0p8Yf/36V7p8YVumH\n4xpJ7Sr9dWFP+f8lGffbq9J9Xce+Z27Pst+E9++T1JH1982xf1ZuCgB1v8kAYBYhEAAkBAKAhEAA\nkBAIAJJZEwhmtp5+9JttvT4M/cabNYEgKe8rgX4nbr8P8udWj37JbAoEAHWW6wOTOpY0es+ypknf\n13t4VJ3tjZO+b9ehzlC/xsGpP7eRoX4V5kw+CDjaMtnM1fT8OGWjA/1qbJti8DAYy8f7yo3196th\n7uT9CoOxfoWB0SnfVxwZUHOhbfJ3Rr/HbPIrtDjSr+bCcYY4g/0G2wuTvv1416UkNffF+o0VJv/8\nRgb7VWiZul/jYOWDs4ND76o43D/tN/bk10BGepY16f9+vmz6C05w5j//Xajfkt0j019oEu+cHrta\nxibPummNtga/oSbPz2kteilW1/HskVCdFWNfB2+IJaUNx/rtuTr2i6frqVi/300RQNNZtKe/4pot\nO26f0eW4yQAgqSoQ8nw+RADZCwdCHZ4PEUDGqjlDyPX5EAFkr5pAqNvzIQLIRjWBMKPnQzSz9Wa2\n1cy29h6e+s9WAOqvmkCY0fMhuvsd7r7K3VdN9TgDALNDNYGQ6/MhAshe+IFJ7j5iZn8v6ecqbey5\n29131ezIAOSuqkcquvsjkh6p0bEAqDMeqQggyXWWYdehztBcws5r/zXU74LLrwrVzX0jdudnYzE4\nVLM4NkzVEPyjzUhrrN/eLy4M1S0Obobo644d57z9sa+DBWew+rpiP0bDbbHPr7+7teKasRdn9ruf\nMwQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEgIBAAJgQAgIRAAJAQCgIRAAJDkOu3YOOihbUrR\nqcXN/3lPqO7iT18WqhtbMMUqs2kUeuaF6gY6YlOZ0dVjC14JlWlwSWyqb/Dk2Djn4MeGQ3WnfXlb\nqK645pxQ3asXx34fn/RcseKahuGZrX/jDAFAQiAASAgEAEk1q9yWmdkTZrbbzHaZ2YZaHhiA/FVz\np+KIpK+5+zYzmy/pGTPb7O7BJ8wCUG/hMwR3P+ju28ovvydpt1jlBpzQanIfgpn1SDpL0pZafDwA\n9VF1IJjZPEk/knSdux+d5P1pt+PIUH+17QBkqKpAMLMmlcJgk7s/ONllxu92LMyZW007ABmr5q8M\nJukuSbvd/bu1OyQA9VLNGcJ5kv5S0ufM7Lnyv4tqdFwA6qCaZa+/khR7kDqAWYlHKgJIcp12HG0x\nvXN65S2juxajU4v/9aufhOp+f9Pfhurm74udaDX1x6YWhxYGpw87Y/1aekNl6tga+33VMNocqnvv\n8tWhusVPvxmqW/FkrM6WL6u8ZoRpRwAVIhAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAk\nBAKAhEAAkBAIAJJcpx3dpLGmyusai7Epu+iuxejU4t4v3Raq++SNsX7F+bGpxeKiUJk6ts9sYq5W\nmo9UvgdUkrwQ+z33Xlfsx6F/RWeo7uiFS0N17bsGK67x12Z2nXCGACAhEAAkBAKApBZ7GRrN7Fkz\ne7gWBwSgfmpxhrBBpTVuAE5w1S5q6ZZ0saQ7a3M4AOqp2jOEWyXdICnfv0cByEQ1m5sukXTI3Z+Z\n5nJpt+PoALsdgdms2s1Na81sn6QfqrTB6b6JFxq/27Gxjd2OwGwWDgR3v8ndu929R9IVkn7h7lfW\n7MgA5I7HIQBIajLL4O5PSnqyFh8LQP1whgAgyXXaUQ3SaGvlk4uDi2NTfYWeeaG66K7F6NTilm/F\npiT/9Kq/DtXNeWcoVPf2ytj12RAbWtTgktiOxrbe0VCdB38aigtju0db3o1N8R5d3lJxzeguph0B\nVIhAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABI8t3tKGksMBjWEBte\n00BHbAqtqT82hRbdtRidWnzintiTXZ97/VdCdQv2D4fqbDR2fTYUY8/d23Q49tydb5zXHqrreaQY\nqju4uvKpRUnqeqryz69xcGbXJWcIABICAUBCIABIqt3ctMjMHjCzF81st5l9qlYHBiB/1d6p+D1J\nj7r7X5hZs6S2GhwTgDoJB4KZLZD0GUl/JUnuXpQUu7sVwKxQzU2GUyX1SrqnvA7+TjNjNRNwAqsm\nEAqSzpZ0m7ufJalf0o0TLzR+t+NYP7sdgdmsmkA4IOmAu28pv/6ASgHxPuN3OzbM5QQCmM2q2e34\npqT9Zrai/KbzJb1Qk6MCUBfV/pXhHyRtKv+F4TeSrqr+kADUS1WB4O7PSVpVo2MBUGc8UhFAkuu0\nY2FQWvRS5XUjrbEpwua+2JTd0MJYv+KiUFl412J0avF/brk9VHfOxtjuyoX7Yp/f4Y/HpgEbRmJ1\n7TtCZeo7JbaDsnNHbHq0b1lrxTVjL7DbEUCFCAQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEgI\nBAAJgQAgIRAAJAQCgCTfaceBUXU8e6Tiur1fXBjqt+CVUJkGO2NTkh3bY7sI3145L1QX3bUYnVp8\n+p9uC9VddzD2lBmNil2fQ2NNobqXN5we69c+J1Q33Bb7fVwYClwvPrPvac4QACQEAoCEQACQVLvb\n8Xoz22VmO83sfjOLPVUNgFkhHAhm1iXpWkmr3P1MSY2SrqjVgQHIX7U3GQqSWs2soNKi1zeqPyQA\n9VLNopbXJX1H0muSDko64u6P1erAAOSvmpsMiyVdKmm5pFMkzTWzKye5XNrtWBwZiB8pgMxVc5Ph\n85J+6+697j4s6UFJ50680Pjdjs2FtiraAchaNYHwmqTVZtZmZqbSbsfdtTksAPVQzX0IW1Ta+LxN\n0vPlj3VHjY4LQB1Uu9vxZkk31+hYANQZj1QEkOQ67Sh3WXGk4rLFL8TaDS6J7Whs6Y31i2qo/CqR\nJNlobCozumsxOrV469Ktobpv9p4Rqutujj0cZs/YilDdWCH2fWaxL5/6ljZWXDPWNLNj5AwBQEIg\nAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAk+U47mskbKs+gvu7YNNng\nyaOhuo6tsZxsPhIbWxxc0hyqayjGdh8e/nhsfUZ012J0avHmztiYa7Rf4a3K945K0sDKuaG6xtjQ\nqVoPVz4mOdOJWs4QACQEAoCEQACQTBsIZna3mR0ys53j3rbEzDab2Z7y/4uzPUwAeZjJGcIPJK2Z\n8LYbJT3u7qdJerz8OoAT3LSB4O6/lPTOhDdfKune8sv3SrqsxscFoA6i9yGc7O4HJan8/0m1OyQA\n9ZL54xDMbL2k9ZLU0rQg63YAqhA9Q3jLzJZKUvn/Q1Nd8P27HWMP4ACQj2ggPCRpXfnldZJ+WpvD\nAVBPM/mz4/2S/lfSCjM7YGbXSPqWpAvMbI+kC8qvAzjBTXsfgrt/YYp3nV/jYwFQZzxSEUCS/27H\n4conAuftjy3BG/zYcKiuYTQ2feiFWL629camMpsO94fqGkZi045DY02huuiuxbynJC/q6wrVNfcF\nlzQGDc2vfPp3bIbrIDlDAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABI\nCAQASa7TjoPtBe25urPiOgsOk5325W2huvcuXx2r64pdnR78KrxxXnuorn1HrN/LG04P1e0ZWxGq\ni+5ajE4tPrJ9c6juj796aqhupCW2s7S4qPI6Z9oRQKUIBAAJgQAgie52/LaZvWhmO8zsx2a2KNvD\nBJCH6G7HzZLOdPeVkl6WdFONjwtAHYR2O7r7Y+5+7MkRfy2pO4NjA5CzWtyHcLWkn031TjNbb2Zb\nzWzrWH/sSUEB5KOqQDCzjZJGJG2a6jLjV7k1zGWVGzCbhR+YZGbrJF0i6Xx3z/d5qAFkIhQIZrZG\n0jck/Ym7D9T2kADUS3S3479Imi9ps5k9Z2a3Z3ycAHIQ3e14VwbHAqDOeKQigCTXacfmPlfXU5Xv\nduwLThEW15wTqlv89Juhuv4VlU9ySlJx4QxH0SboeaQYqus7Jba7cqh9TqhurBCb6htYGfurVHTX\nYnRq8b+//2+hujVrrwzVmc+ruKZxhmtOOUMAkBAIABICAUBCIABICAQACYEAICEQACQEAoCEQACQ\nEAgAEgIBQEIgAEgIBABJrtOOYwXT79orbzncFpuWe/XiWN6teDI27Xj0wqWhupZ3Y9N5B1e3hOo6\nd8xw9G2C4bbY9Rndzdk4FKuLiu5ajE4tPvrQfaG6z17zNxXX2MjMvgicIQBICAQASWiV27j3fd3M\n3Mw6sjk8AHmKrnKTmS2TdIGk12p8TADqJLTKrewWSTdIYicD8AERug/BzNZKet3dt9f4eADUUcV/\nAzSzNkkbJV04w8uvl7RekprnLq60HYAcRc4QPippuaTtZrZPpc3P28zsI5NdePxux0ILux2B2azi\nMwR3f17SScdeL4fCKnd/u4bHBaAOoqvcAHwARVe5jX9/T82OBkBd8UhFAAmBACAx9/weV7RgXpd/\ncuVXKq7r724N9Wvtje0+bDrUF6ob7qh8554kHV0em1pcuHcgVNe3LHZ9FobGYv2WxnZXth6OfW8O\nzY9NLRYXxermvR67XpqPjIbqnrzr3yuu+cSf7dfW7YPTfoKcIQBICAQACYEAICEQACQEAoCEQACQ\nEAgAEgIBQEIgAEgIBAAJgQAgIRAAJAQCgCTXaUcz65X06hTv7pCU59Ow0e/E7fdB/tyy6vd77t45\n3YVyDYTjMbOt7r6KfvSbTb0+DP3G4yYDgIRAAJDMpkC4g370m4W9Pgz9kllzHwKA+ptNZwgA6oxA\nAJAQCAASAgFAQiAASP4f9ExjOWXvAGoAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEiBJREFUeJzt3XuMnPV1xvHn7O6Ml7XX2Mb3S1hz\nCZSiNLYsIKQyBELjGASRiFTS0LpNJFRoGkBEBESTqFX/iJQ0BKmNEQUS1DikEpAGcbcICCESCCzm\nZruYYrB3bWNDjC+79s7O7ukfM/5lvVp7d87MOzOG70da7W3Onncu++w7776/OebuAgBJamn0BgBo\nHgQCgIRAAJAQCAASAgFAQiAASBoeCGa2wsz+18zeMrObMu61yMyeMrMNZvaGmV2bZb8RfVvN7GUz\ne6gOvaaZ2X1mtrF8PT+Tcb/ry7fl62Z2r5m11/jn321mO83s9RFfm2Fma81sU/n99Iz7/aB8e75q\nZr8ys2lZ9hvxvW+ZmZvZzFr1G09DA8HMWiX9h6QvSjpD0lfM7IwMWxYl3eDufyLpHEn/kHG/Q66V\ntKEOfSTpNkmPufvpkv4sy75mtkDSNyUtc/czJbVKuqLGbX4macWor90k6Ul3P1XSk+XPs+y3VtKZ\n7v4pSW9KujnjfjKzRZIukrSlhr3G1eg9hLMkveXub7t7QdIvJV2WVTN33+7u3eWP96n0y7Igq36S\nZGYLJV0s6c4s+5R7TZW0XNJdkuTuBXf/MOO2bZKOM7M2SR2SttXyh7v7M5L+MOrLl0m6p/zxPZK+\nlGU/d3/C3YvlT38naWGW/cpulXSjpLqeOdjoQFggaeuIz3uU8S/oIWbWJWmJpOczbvVjle7Y4Yz7\nSNJJknZJ+mn5KcqdZjY5q2bu3ivphyr9FdsuaY+7P5FVvxHmuPv28jZslzS7Dj0P+ZqkR7NsYGaX\nSup191ey7DOWRgeCjfG1zBPRzKZIul/Sde6+N8M+l0ja6e4vZdVjlDZJSyWtdvclkvpU293pw5Sf\nu18mabGk+ZImm9mVWfVrNDO7RaWnnWsy7NEh6RZJ382qx9E0OhB6JC0a8flC1XiXczQzy6kUBmvc\n/YEse0n6rKRLzewdlZ4OXWBmP8+wX4+kHnc/tNdzn0oBkZXPS9rs7rvcfVDSA5LOzbDfIe+Z2TxJ\nKr/fmXVDM1sl6RJJX/VsFwCdrFLAvlJ+3CyU1G1mczPsmTQ6EH4v6VQzW2xmeZUOSD2YVTMzM5We\nX29w9x9l1ecQd7/Z3Re6e5dK1+037p7ZX1B33yFpq5mdVv7ShZLWZ9VPpacK55hZR/m2vVD1OXj6\noKRV5Y9XSfp1ls3MbIWkb0u61N37s+zl7q+5+2x37yo/bnokLS3ft9lz94a+SVqp0pHb/5N0S8a9\n/lylpySvSlpXfltZp+t5vqSH6tDn05JeLF/H/5E0PeN+/yxpo6TXJf2XpEk1/vn3qnR8YlClX46v\nSzpBpf8ubCq/n5Fxv7dUOtZ16DFze5b9Rn3/HUkzs37cHHqzclMAaPhTBgBNhEAAkBAIABICAUBC\nIABImiYQzOwq+tGv2Xp9HPqN1DSBIKneNwL9jt1+H+Xr1oh+STMFAoAGq+uJSTNntHrXotyY39v1\nwZBmndA65vfWb5sV6mdHuWrFg31qax97IaCPteRqQg2P0u9An9qOG7vf8Ng3ybhyfUe+goOFPuXy\nY/ezodh97nbkKzg42Kdcbux+LYXimF8f1xH6FYb6lW/tiP3Mo/Dc2I+/QqFP+SPclpLk0T+rR7gb\njnbfSVJL/0DFrQ4M7VNh+OC4j+y2in9yFboW5fTC44vGv+Aoy757dahfayFUpmLwNX+G22JJ0j8v\n9gs694WhUF1+d+yGGc6P/QsznklbxlruPwH5YFIeJbiOpjA7tlJ8cErs16h1ILYivr17c8U1v919\n/4Qux1MGAElVgVDP10MEkL1wIDTg9RABZKyaPYS6vh4igOxVEwgNez1EANmoJhAm9HqIZnaVmb1o\nZi/u+iB2VBxAfVQTCBN6PUR3v8Pdl7n7siOdZwCgOVQTCHV9PUQA2QufmOTuRTP7hqTHVZrYc7e7\nv1GzLQNQd1Wdqejuj0h6pEbbAqDBOFMRQFLXtQzrt80KrUt48V9Wh/qtPP/yUF3vF+eE6qZujf0X\npdgRO9h6IHiQNr87VKaeC/Khuvb354XqhoJrSjp2xNaG9M2LrYGY8/vKFxtJ0oenxG5P6/pkxTWD\n90/sxmQPAUBCIABICAQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEgIBAAJgQAgIRAAJHVd7Wge\nm6YUXbX4yNMTm1Yz2oqLvxqq80mx1YedGw+G6voXHx+qK0yLrbLrfDdUptz+2ISigeNjqw/3nBwq\nU9d3ngvVDZ2/NFQ3fVNsglb75g8qrnl73+CELsceAoCEQACQEAgAkmpGuS0ys6fMbIOZvWFm19Zy\nwwDUXzUHFYuSbnD3bjPrlPSSma119/U12jYAdRbeQ3D37e7eXf54n6QNYpQbcEyryTEEM+uStETS\n87X4eQAao+pAMLMpku6XdJ277x3j+2m2Y/FgX7XtAGSoqkAws5xKYbDG3R8Y6zIjZzu2tU+uph2A\njFXzXwaTdJekDe7+o9ptEoBGqWYP4bOS/lrSBWa2rvy2skbbBaABqhn2+qyk2MnmAJoSZyoCSOq6\n2tFNKgbm9UVnLUZXLT728JpQ3bnX/32obmBa7GDrrO79obrWnl2hum3XLQ7VDc2NrerL9UyK9ZsU\nm+0Yld+2J1bose0cmjGl8lbbJ/a3nz0EAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABICAQACYEA\nICEQACQEAoCEQACQ1HW1o0wabqv8JRSmbh0KtYvOWoyuWnzu1ttDdWffdHWoTsXYzMRtl58Uqpv/\nbDFU1zIYux+G2mP3e25vrK7v8rNDdW39sfvhwKzYr9+U3spXj3orqx0BVIhAAJAQCACSWsxlaDWz\nl83soVpsEIDGqcUewrUqjXEDcIyrdlDLQkkXS7qzNpsDoJGq3UP4saQbJcX+7wKgqVQzuekSSTvd\n/aVxLvfH2Y4HmO0INLNqJzddambvSPqlShOcfj76QofNdjyO2Y5AMwsHgrvf7O4L3b1L0hWSfuPu\nV9ZsywDUHechAEhqspbB3Z+W9HQtfhaAxmEPAUBS19WOwzmpf17l8+yKHbHVcp0bD4bqorMWo6sW\nn//+6lDdxUu/EKqb859vhere/5uloToFRy3m98cKO9ftCNXZ4pmhOm+JDUHvmx8dnp6vuGI4P7Fe\n7CEASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABICAQACYEAICEQACQEAoCkrqsdc32uuS9U\nPnfvwAmx1Y79i48P1c3q3h+qi85ajK5afLj78VDdGauvCdV94tG9obpiZ+Wr8yRpYHouVDc4b3qo\nbsdZ7aG6E3+xJVRXXP6JUF3He4MV17QMTmzlKHsIABICAUBCIABIqp3cNM3M7jOzjWa2wcw+U6sN\nA1B/1R5UvE3SY+7+ZTPLS+qowTYBaJBwIJjZVEnLJf2tJLl7QVKhNpsFoBGqecpwkqRdkn5aHgd/\np5kxmgk4hlUTCG2Slkpa7e5LJPVJumn0hUbOdhwsMNsRaGbVBEKPpB53f778+X0qBcRhRs52zOXZ\ngQCaWTWzHXdI2mpmp5W/dKGk9TXZKgANUe1/Gf5R0pryfxjelvR31W8SgEapKhDcfZ2kZTXaFgAN\nxpmKAJK6rna0IVd+d+WnKuR3x/oVpsVW2bX27ArVbbv8pFBddNZidNXi+qt/Eqpb+cBfhupaX3oj\nVDfpvCWhuuLk2MPaY4tqVeiaFaqb+WrlK38lydsCMyEnWMIeAoCEQACQEAgAEgIBQEIgAEgIBAAJ\ngQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASMx9YjPfaqFz6kJfdtY3Kq7ruSC2arHz3VCZ9pwSq5v/\nbGz1Wt/c2DK7mS/vC9W1HKh8NqAkPbL2v0N1f7X5c6G6ztzBUF1v/7RQ3fA1naG6wpwpobr+2bHH\n9ZStByqueWHdau3d3zvumkf2EAAkBAKAhEAAkFQ72/F6M3vDzF43s3vNrL1WGwag/sKBYGYLJH1T\n0jJ3P1NSq6QrarVhAOqv2qcMbZKOM7M2lQa9bqt+kwA0SjWDWnol/VDSFknbJe1x9ydqtWEA6q+a\npwzTJV0mabGk+ZImm9mVY1zuj7MdB5ntCDSzap4yfF7SZnff5e6Dkh6QdO7oCx022zHHbEegmVUT\nCFsknWNmHWZmKs123FCbzQLQCNUcQ3hepYnP3ZJeK/+sO2q0XQAaoNrZjt+T9L0abQuABuNMRQBJ\nXWc7thSKmrTlDxXXtb8/L9Qvt384VDc0t/L5k5LUMhgcDhhccFrsDM6uDM5ajK5a/MXip0J1//r+\n6aG6XQdjqw8H9uwP1dkJsYPlgx2BGY2S+hZUfkLw8PqJ/e1nDwFAQiAASAgEAAmBACAhEAAkBAKA\nhEAAkBAIABICAUBCIABICAQACYEAICEQACR1Xe0oMymfq7hsKDjtYeD42GqyXM+kUN1Qe2y2Y35/\nbLnjwPTKb0tJmnTeklBdZy72gljRVYv/NHNjqO6GwY5Q3YapJ4fq+ucGV50WYvf7wRmV/x0fnuBv\nOnsIABICAUBCIABIxg0EM7vbzHaa2esjvjbDzNaa2aby++nZbiaAepjIHsLPJK0Y9bWbJD3p7qdK\nerL8OYBj3LiB4O7PSBr9QoiXSbqn/PE9kr5U4+0C0ADRYwhz3H27JJXfz67dJgFolMwPKo6c7VgY\n6s+6HYAqRAPhPTObJ0nl9zuPdMGRsx3zrbETRgDURzQQHpS0qvzxKkm/rs3mAGikifzb8V5Jv5V0\nmpn1mNnXJX1f0kVmtknSReXPARzjxj3D2d2/coRvXVjjbQHQYJypCCCp72pHqbTisUIdO2KrwvbE\nFq9paFKsX25vbLVj57odobrBebETRIuTY3d7b/+0UF101mJ01eK/zesO1V18YGGornUw9njp3NwX\nqvvw9MpvT5vgQ5M9BAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAA\nkNR1taPnWlWYPbniur55sRmNXd95LlQX1Xf52aE6WzwzVLfjrNjQS28NlenEazpDdQN79ofqorMW\no6sWH37uwVDd8muuCtX1XjA1VNdSqLyG2Y4AKkYgAEgIBABJdLbjD8xso5m9ama/MrPYS+kAaCrR\n2Y5rJZ3p7p+S9Kakm2u8XQAaIDTb0d2fcPdi+dPfSYod1gXQVGpxDOFrkh490jcPG+VWiL2oJID6\nqCoQzOwWSUVJa450mcNGueUrPwcBQP2ET0wys1WSLpF0obvHXocaQFMJBYKZrZD0bUnnuTsjnYGP\niOhsx3+X1ClprZmtM7PbM95OAHUQne14VwbbAqDBOFMRQGL1PB7YOW2hf/q8ayuuaz0Qm5nYUoxd\nt/y2PaG6/pNnhOpaC8OhuvY33wvVFbpmheq8Lbbq1IL3Q//cfKguOmsx6pmf3BGqW/m5L4fq9v3p\nCRXXvPLkbdq/e+u4dyB7CAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABICAQA\nCYEAIKnrbEe51DpQ+cq+D0+JrXqbvikwBE+SgitAD8yK3Zx982OrCIvLPxGqm/lqbPXocHC142BH\nrK61ELsfOjfHXsw3OmsxumrxkafuC9UtvzoyS3JityV7CAASAgFAEhrlNuJ73zIzN7PYPHMATSU6\nyk1mtkjSRZK21HibADRIaJRb2a2SbtREj1YAaHqhYwhmdqmkXnd/pcbbA6CBKv4/mZl1SLpF0l9M\n8PJXSbpKkia1MzUeaGaRPYSTJS2W9IqZvaPS5OduM5s71oVHznbMMdsRaGoV7yG4+2uSZh/6vBwK\ny9z9/RpuF4AGiI5yA/ARFB3lNvL7XTXbGgANxZmKABICAUBS19WOLf0Dau/eXHGddX0y1K998weh\nuqEZU0J1U3qDqysVW83Z8d5gqC46ozG/O3b9+ha0h+oOzoj9vfrw9Nj91xK8+yKzFqXoqkXpmdWV\nz5I86wsTO+bPHgKAhEAAkBAIABICAUBCIABICAQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEjM\ng3MMQ83Mdkl69wjfnimpni/DRr9jt99H+bpl1e9Ed5813oXqGghHY2Yvuvsy+tGvmXp9HPqNxFMG\nAAmBACBppkCo/GVg6Pdx7fdRvm6N6Jc0zTEEAI3XTHsIABqMQACQEAgAEgIBQEIgAEj+H7HwZvMK\n5HNFAAAAAElFTkSuQmCC\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from brainiak.matnormal import MNRSA\n", + "from sklearn.linear_model import LinearRegression\n", + "\n", + "# beta_series RSA\n", + "model_linreg = LinearRegression(fit_intercept=False)\n", + "model_linreg.fit(true_X, true_Y)\n", + "beta_series = model_linreg.coef_\n", + "naive_RSA = np.corrcoef(beta_series.T)\n", + "\n", + "# MN-RSA\n", + "model_matnorm = MNRSA(time_cov=time_cov,\n", + " space_cov=space_cov, n_nureg=3)\n", + "\n", + "model_matnorm.fit(true_Y, true_X)\n", + "\n", + "# very similar on this toy data but in real settings may be more different. \n", + "plt.matshow(model_matnorm.C_)\n", + "plt.matshow(naive_RSA)" + ] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python [conda env:brainiak]", + "language": "python", + "name": "conda-env-brainiak-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py new file mode 100644 index 000000000..0ba0cab1e --- /dev/null +++ b/tests/matnormal/test_cov.py @@ -0,0 +1,296 @@ +import numpy as np +from numpy.testing import assert_allclose +from scipy.stats import norm, wishart, invgamma +from brainiak.matnormal.covs import * +import tensorflow as tf +import pytest +import logging + +logging.basicConfig(level=logging.DEBUG) + +# X is m x n, so A sould be m x p + +m = 8 +n = 4 +p = 3 + +rtol = 1e-7 +atol = 1e-7 + +def logdet_sinv_np(X, sigma): + # logdet + _, logdet_np = np.linalg.slogdet(sigma) + # sigma-inv + sinv_np = np.linalg.inv(sigma) + # solve + sinvx_np = np.linalg.solve(sigma, X) + return logdet_np, sinv_np, sinvx_np + + +def logdet_sinv_np_mask(X, sigma, mask): + mask_indices = np.nonzero(mask)[0] + # logdet + _, logdet_np = np.linalg.slogdet(sigma[np.ix_(mask_indices, mask_indices)]) + # sigma-inv + sinv_np_ = np.linalg.inv(sigma[np.ix_(mask_indices, mask_indices)]) + # sigma-inverse * + sinvx_np_ = sinv_np_.dot(X[mask_indices,:]) + + sinv_np = np.zeros_like(sigma) + sinv_np[np.ix_(mask_indices, mask_indices)] = sinv_np_ + sinvx_np = np.zeros_like(X) + sinvx_np[mask_indices, :] = sinvx_np_ + + return logdet_np, sinv_np, sinvx_np + + +X = norm.rvs(size=(m, n)) +X_tf = tf.constant(X) +A = norm.rvs(size=(m, p)) +A_tf = tf.constant(A) + + +def test_CovConstant(): + + cov_np = wishart.rvs(df=m+2, scale=np.eye(m)) + cov = CovUnconstrainedCholesky(m, cov_np) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + + +def test_CovIdentity(): + + cov = CovIdentity(size=m) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = np.eye(m) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + + +def test_CovIsotropic(): + + cov = CovIsotropic(size=m) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = cov.sigma.eval(session=sess) * np.eye(cov.size) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + + +def test_CovDiagonal(): + + cov = CovDiagonal(size=m) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = np.diag(1/cov.prec.eval(session=sess)) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + +def test_CovDiagonal_initialized(): + + cov_np = np.diag(np.exp(np.random.normal(size=m))) + cov = CovDiagonal(size=m, sigma=np.diag(cov_np)) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + + +def test_CovDiagonalGammaPrior(): + + cov_np = np.diag(np.exp(np.random.normal(size=m))) + cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, beta=1e-10) + + ig = invgamma(1.5, scale=1e-10) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + penalty_np = np.sum(ig.logpdf(1/np.diag(cov_np))) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(penalty_np, cov.logp.eval(session=sess), rtol=rtol) + + +def test_CovUnconstrainedCholesky(): + + cov = CovUnconstrainedCholesky(size=m) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = cov.Sigma.eval(session=sess) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + + +def test_CovUnconstrainedCholeskyWishartReg(): + + cov = CovUnconstrainedCholeskyWishartReg(size=m) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = cov.Sigma.eval(session=sess) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + # now compute the regularizer + reg = wishart.logpdf(cov_np, df=m+2, scale=1e10 * np.eye(m)) + assert_allclose(reg, cov.logp.eval(session=sess), rtol=rtol) + +def test_CovUnconstrainedInvCholesky(): + + cov = CovUnconstrainedInvCholesky(size=m) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = cov.Sigma.eval(session=sess) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + +def test_Cov2FactorKron(): + assert(m%2 == 0) + dim1 = int(m/2) + dim2 = 2 + + with pytest.raises(TypeError) as excinfo: + cov = CovKroneckerFactored(sizes=dim1) + assert "sizes is not a list" in str(excinfo.value) + + cov = CovKroneckerFactored(sizes=[dim1, dim2]) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + L1 = (cov.L[0]).eval(session=sess) + L2 = (cov.L[1]).eval(session=sess) + cov_np = np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose())) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + +def test_Cov3FactorKron(): + assert(m%4 == 0) + dim1 = int(m/4) + dim2 = 2 + dim3 = 2 + cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3]) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + L1 = (cov.L[0]).eval(session=sess) + L2 = (cov.L[1]).eval(session=sess) + L3 = (cov.L[2]).eval(session=sess) + cov_np = np.kron(np.kron(np.dot(L1, L1.transpose()),\ + np.dot(L2, L2.transpose())), np.dot(L3, L3.transpose())) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + +def test_Cov3FactorMaskedKron(): + assert(m%4 == 0) + dim1 = int(m/4) + dim2 = 2 + dim3 = 2 + + mask = np.random.binomial(1, 0.5, m).astype(np.int32) + + if sum(mask == 0): + mask[0] = 1 + mask_indices = np.nonzero(mask)[0] + + cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3], mask=mask) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + L1 = (cov.L[0]).eval(session=sess) + L2 = (cov.L[1]).eval(session=sess) + L3 = (cov.L[2]).eval(session=sess) + cov_np_factor = np.kron(L1, np.kron(L2, L3))[np.ix_(mask_indices, mask_indices)] + cov_np = np.dot(cov_np_factor, cov_np_factor.transpose()) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X[mask_indices,:], cov_np) + + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol, atol=atol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess)[np.ix_(mask_indices, mask_indices)], rtol=rtol, atol=atol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess)[mask_indices,:], rtol=rtol, atol=atol) + + +def test_CovAR1(): + + cov = CovAR1(size=m) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = np.linalg.inv(cov.Sigma_inv.eval(session=sess)) + + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + + + +def test_CovAR1_scan_onsets(): + + cov = CovAR1(size=m, scan_onsets=[0, m//2]) + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(cov.get_optimize_vars())) + # compute the naive version + cov_np = np.linalg.inv(cov.Sigma_inv.eval(session=sess)) + + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py new file mode 100644 index 000000000..95c27127f --- /dev/null +++ b/tests/matnormal/test_matnormal_logp.py @@ -0,0 +1,56 @@ +import numpy as np +from numpy.testing import assert_allclose +from scipy.stats import multivariate_normal +import tensorflow as tf +from brainiak.utils.brsa_gendata import rmn +from brainiak.matnormal.matnormal_likelihoods import matnorm_logp +from brainiak.matnormal.covs import CovIdentity,CovUnconstrainedCholesky +import logging + +logging.basicConfig(level=logging.DEBUG) + +# X is m x n, so A sould be m x p + +m = 5 +n = 4 +p = 3 + +rtol = 1e-7 + + +def test_against_scipy_mvn_row(): + + with tf.Session() as sess: + + rowcov = CovUnconstrainedCholesky(size=m) + colcov = CovIdentity(size=n) + X = rmn(np.eye(m), np.eye(n)) + X_tf = tf.constant(X, 'float64') + + sess.run(tf.global_variables_initializer()) + + rowcov_np = rowcov.Sigma.eval(session=sess) + + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), + rowcov_np)) + tf_answer = matnorm_logp(X_tf, rowcov, colcov) + assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + + +def test_against_scipy_mvn_col(): + + with tf.Session() as sess: + + rowcov = CovIdentity(size=m) + colcov = CovUnconstrainedCholesky(size=n) + X = rmn(np.eye(m), np.eye(n)) + X_tf = tf.constant(X, 'float64') + + sess.run(tf.global_variables_initializer()) + + colcov_np = colcov.Sigma.eval(session=sess) + + scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), + colcov_np)) + tf_answer = matnorm_logp(X_tf, rowcov, colcov) + assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py new file mode 100644 index 000000000..ad373b463 --- /dev/null +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -0,0 +1,87 @@ +import numpy as np +from numpy.testing import assert_allclose +from scipy.stats import wishart, multivariate_normal +import tensorflow as tf +from brainiak.utils.brsa_gendata import rmn +from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_conditional_col, matnorm_logp_conditional_row +from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky +import logging + +logging.basicConfig(level=logging.DEBUG) + +# X is m x n, so A sould be m x p + +m = 5 +n = 4 +p = 3 + +rtol = 1e-7 + + +def test_against_scipy_mvn_col_conditional(): + + # have to be careful for constructing everything as a submatrix of a big + # PSD matrix, else no guarantee that anything's invertible. + cov_np = wishart.rvs(df=m+p+2, scale=np.eye(m+p)) + + # rowcov = CovConstant(cov_np[0:m, 0:m]) + rowcov = CovUnconstrainedCholesky(size=m, Sigma=cov_np[0:m, 0:m]) + A = cov_np[0:m, m:] + + colcov = CovIdentity(size=n) + + Q = CovUnconstrainedCholesky(size=p, Sigma=cov_np[m:, m:]) + + X = rmn(np.eye(m), np.eye(n)) + + A_tf = tf.constant(A, 'float64') + X_tf = tf.constant(X, 'float64') + + with tf.Session() as sess: + + sess.run(tf.global_variables_initializer()) + + Q_np = Q.Sigma.eval(session=sess) + + rowcov_np = rowcov.Sigma.eval(session=sess) - \ + A.dot(np.linalg.inv(Q_np)).dot((A.T)) + + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), + rowcov_np)) + + tf_answer = matnorm_logp_conditional_row(X_tf, rowcov, colcov, A_tf, Q) + assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + + +def test_against_scipy_mvn_row_conditional(): + + # have to be careful for constructing everything as a submatrix of a big + # PSD matrix, else no guarantee that anything's invertible. + cov_np = wishart.rvs(df=m+p+2, scale=np.eye(m+p)) + + rowcov = CovIdentity(size=m) + colcov = CovUnconstrainedCholesky(size=n, Sigma=cov_np[0:n, 0:n]) + A = cov_np[n:, 0:n] + + Q = CovUnconstrainedCholesky(size=p, Sigma=cov_np[n:, n:]) + + X = rmn(np.eye(m), np.eye(n)) + + A_tf = tf.constant(A, 'float64') + X_tf = tf.constant(X, 'float64') + + with tf.Session() as sess: + + sess.run(tf.global_variables_initializer()) + + Q_np = Q.Sigma.eval(session=sess) + + colcov_np = colcov.Sigma.eval(session=sess) - \ + A.T.dot(np.linalg.inv(Q_np)).dot((A)) + + scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), + colcov_np)) + + tf_answer = matnorm_logp_conditional_col(X_tf, rowcov, colcov, A_tf, Q) + + assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py new file mode 100644 index 000000000..57436f4d8 --- /dev/null +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -0,0 +1,75 @@ +import numpy as np +from numpy.testing import assert_allclose +from scipy.stats import multivariate_normal +import tensorflow as tf +from brainiak.utils.brsa_gendata import rmn +from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_col, matnorm_logp_marginal_row + +from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky +import logging + +logging.basicConfig(level=logging.DEBUG) + +# X is m x n, so A sould be m x p + +m = 5 +n = 4 +p = 3 + +rtol = 1e-7 + + +def test_against_scipy_mvn_row_marginal(): + + rowcov = CovUnconstrainedCholesky(size=m) + colcov = CovIdentity(size=n) + Q = CovUnconstrainedCholesky(size=p) + + X = rmn(np.eye(m), np.eye(n)) + A = rmn(np.eye(m), np.eye(p)) + + A_tf = tf.constant(A, 'float64') + X_tf = tf.constant(X, 'float64') + + with tf.Session() as sess: + + sess.run(tf.global_variables_initializer()) + + Q_np = Q.Sigma.eval(session=sess) + + rowcov_np = rowcov.Sigma.eval(session=sess) + A.dot(Q_np).dot(A.T) + + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), + rowcov_np)) + + tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, + A_tf, Q) + assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + + +def test_against_scipy_mvn_col_marginal(): + + rowcov = CovIdentity(size=m) + colcov = CovUnconstrainedCholesky(size=n) + Q = CovUnconstrainedCholesky(size=p) + + X = rmn(np.eye(m), np.eye(n)) + A = rmn(np.eye(p), np.eye(n)) + + A_tf = tf.constant(A, 'float64') + X_tf = tf.constant(X, 'float64') + + with tf.Session() as sess: + + sess.run(tf.global_variables_initializer()) + + Q_np = Q.Sigma.eval(session=sess) + + colcov_np = colcov.Sigma.eval(session=sess) + A.T.dot(Q_np).dot(A) + + scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), + colcov_np)) + + tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, + A_tf, Q) + assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py new file mode 100644 index 000000000..fd94ba34d --- /dev/null +++ b/tests/matnormal/test_matnormal_regression.py @@ -0,0 +1,105 @@ +import numpy as np +from scipy.stats import norm, wishart, pearsonr +from brainiak.matnormal.covs import\ + CovIdentity, CovUnconstrainedCholesky, CovUnconstrainedInvCholesky, CovDiagonal +from brainiak.matnormal import MatnormRegression +from brainiak.utils.brsa_gendata import rmn +import pytest +import logging + +logging.basicConfig(level=logging.DEBUG) + +m = 100 +n = 4 +p = 5 + +corrtol = 0.8 # at least this much correlation between true and est to pass + + +def test_matnorm_regression_unconstrained(): + + # Y = XB + eps + # Y is m x n, B is n x p, eps is m x p + X = norm.rvs(size=(m, n)) + B = norm.rvs(size=(n, p)) + Y_hat = X.dot(B) + rowcov_true = np.eye(m) + colcov_true = wishart.rvs(p+2, np.eye(p)) + + Y = Y_hat + rmn(rowcov_true, colcov_true) + + row_cov = CovIdentity(size=m) + col_cov = CovUnconstrainedCholesky(size=p) + + model = MatnormRegression(time_cov=row_cov, space_cov=col_cov) + + model.fit(X, Y) + + assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + + +def test_matnorm_regression_unconstrainedprec(): + + # Y = XB + eps + # Y is m x n, B is n x p, eps is m x p + X = norm.rvs(size=(m, n)) + B = norm.rvs(size=(n, p)) + Y_hat = X.dot(B) + rowcov_true = np.eye(m) + colcov_true = wishart.rvs(p+2, np.eye(p)) + + Y = Y_hat + rmn(rowcov_true, colcov_true) + + row_cov = CovIdentity(size=m) + col_cov = CovUnconstrainedInvCholesky(size=p) + + model = MatnormRegression(time_cov=row_cov, space_cov=col_cov) + + model.fit(X, Y) + + assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + +def test_matnorm_regression_optimizerChoice(): + + # Y = XB + eps + # Y is m x n, B is n x p, eps is m x p + X = norm.rvs(size=(m, n)) + B = norm.rvs(size=(n, p)) + Y_hat = X.dot(B) + rowcov_true = np.eye(m) + colcov_true = wishart.rvs(p+2, np.eye(p)) + + Y = Y_hat + rmn(rowcov_true, colcov_true) + + row_cov = CovIdentity(size=m) + col_cov = CovUnconstrainedInvCholesky(size=p) + + model = MatnormRegression(time_cov=row_cov, space_cov=col_cov, optimizer="CG") + + model.fit(X, Y) + + assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + +def test_matnorm_regression_scaledDiag(): + + # Y = XB + eps + # Y is m x n, B is n x p, eps is m x p + X = norm.rvs(size=(m, n)) + B = norm.rvs(size=(n, p)) + Y_hat = X.dot(B) + + rowcov_true = np.eye(m) + colcov_true = np.diag(np.abs(norm.rvs(size=p))) + + Y = Y_hat + rmn(rowcov_true, colcov_true) + + row_cov = CovIdentity(size=m) + col_cov = CovDiagonal(size=p) + + model = MatnormRegression(time_cov=row_cov, space_cov=col_cov) + + model.fit(X, Y) + + assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + + diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py new file mode 100644 index 000000000..881eeb3d1 --- /dev/null +++ b/tests/matnormal/test_matnormal_rsa.py @@ -0,0 +1,81 @@ +from brainiak.matnormal import MNRSA +from brainiak.utils.utils import cov2corr +from brainiak.matnormal.covs import CovIdentity, CovDiagonal +from scipy.stats import norm +from numpy.linalg import cholesky +import numpy as np +import logging + +logging.basicConfig(level=logging.DEBUG) + + +def gen_U_nips2016_example(): + + n_C = 16 + U = np.zeros([n_C, n_C]) + U = np.eye(n_C) * 0.6 + U[8:12, 8:12] = 0.8 + for cond in range(8, 12): + U[cond,cond] = 1 + + return U + + +def rmn(rowcov, colcov): + # generate random draws from a zero-mean matrix-normal distribution + Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0]))) + return(cholesky(rowcov).dot(Z).dot(cholesky(colcov))) + + +def gen_brsa_data_matnorm_model(U, n_T, n_V, space_cov, time_cov, n_nureg): + + n_C = U.shape[0] + + beta = rmn(U, space_cov) + + X = rmn(np.eye(n_T), np.eye(n_C)) + + beta_0 = rmn(np.eye(n_nureg), space_cov) + + X_0 = rmn(np.eye(n_T), np.eye(n_nureg)) + + Y_hat = X.dot(beta) + X_0.dot(beta_0) + + Y = Y_hat + rmn(time_cov, space_cov) + + sizes = {"n_C": n_C, "n_T": n_T, "n_V": n_V} + + train = {"beta": beta, "X": X, "Y": Y, "U": U, 'X_0': X_0} + + return train, sizes + + +def test_brsa_rudimentary(): + """this test is super loose""" + + # this is Mingbo's synth example from the paper + U = gen_U_nips2016_example() + + n_T = 150 + n_V = 250 + n_nureg = 5 + + spacecov_true = np.eye(n_V) + + timecov_true = np.diag(np.abs(norm.rvs(size=(n_T)))) + + tr, sz = gen_brsa_data_matnorm_model(U, n_T=n_T, n_V=n_V, n_nureg=n_nureg, + space_cov=spacecov_true, + time_cov=timecov_true) + + spacecov_model = CovIdentity(size=n_V) + timecov_model = CovDiagonal(size=n_T) + + model_matnorm = MNRSA(time_cov=timecov_model, + space_cov=spacecov_model) + + model_matnorm.fit(tr['Y'], tr['X']) + + RMSE = np.mean((model_matnorm.C_ - cov2corr(tr['U']))**2)**0.5 + + assert(RMSE < 0.1) From 94dfcbbd24c1c77c99c52442e8075365dc0060f6 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Mon, 5 Mar 2018 09:57:44 -0500 Subject: [PATCH 02/84] add tensorflow to requirements --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a0d883ec9..eb68228bb 100644 --- a/setup.py +++ b/setup.py @@ -132,7 +132,8 @@ def finalize_options(self): 'pybind11>=1.7', 'psutil', 'nibabel', - 'typing' + 'typing', + 'tensorflow' ], author='Princeton Neuroscience Institute and Intel Corporation', author_email='mihai.capota@intel.com', From 7264b3fd8a678eb3a2d31c06bbe68f0a1ffc2e90 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Mon, 12 Mar 2018 15:06:08 -0400 Subject: [PATCH 03/84] make the linter happy --- brainiak/matnormal/__init__.py | 171 ++++++++++++++------ brainiak/matnormal/covs.py | 62 ++++--- brainiak/matnormal/matnormal_likelihoods.py | 18 ++- brainiak/matnormal/mnrsa.py | 33 ++-- brainiak/matnormal/regression.py | 18 ++- 5 files changed, 202 insertions(+), 100 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index b095ed601..3645af1d9 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -1,7 +1,8 @@ from .mnrsa import MNRSA from .regression import MatnormRegression -"""The matrix variate normal distribution, with conditional and marginal identities +"""The matrix variate normal distribution, + with conditional and marginal identities ========================================================================================== .. math:: @@ -33,23 +34,26 @@ The matrix-variate normal distribution is a generalization to matrices of the -normal distribution. Another name for it is the multivariate normal distribution -with kronecker separable covariance. The distributional intuition is as follows -if :math:`X \\sim \\mathcal{MN}(M,R,C)` then +normal distribution. Another name for it is the multivariate normal +distribution with kronecker separable covariance. +The distributional intuition is as follows: if +:math:`X \\sim \\mathcal{MN}(M,R,C)` then :math:`\\mathrm{vec}(X)\\sim\\mathcal{N}(\\mathrm{vec}(M), C \\otimes R)`, where :math:`\\mathrm{vec}(\\cdot)` is the vectorization operator and :math:`otimes` is the Kronecker product. If we think of X as a matrix of TRs by -voxels in the fMRI setting, then this model assumes that each voxel has the same -TR-by-TR covariance structure (represented by the matrix R), and each volume has -the same spatial covariance (represented by the matrix C). This assumption -allows us to model both covariances separately. We can assume that the spatial -covariance itself is kronecker-structured, which implies that the spatial -covariance of voxels is the same in the X, Y and Z dimensions. +voxels in the fMRI setting, then this model assumes that each voxel has the +same TR-by-TR covariance structure (represented by the matrix R), +and each volume has the same spatial covariance (represented by the matrix C). +This assumption allows us to model both covariances separately. +We can assume that the spatial covariance itself is kronecker-structured, +which implies that the spatial covariance of voxels is the same in the X, +Y and Z dimensions. The log-likelihood for the matrix-normal density is: .. math:: - \\log p(X\\mid \\M,\\R, \\C) = -2\\log mn - m \\log|\\C| - n \\log|\\R| - \\Tr\\left[\\C\\inv(\\X-\\M)\\trp\\R\\inv(\\X-\\M)\\right] + \\log p(X\\mid \\M,\\R, \\C) = -2\\log mn - m \\log|\\C| - n \\log|\\R| - + \\Tr\\left[\\C\\inv(\\X-\\M)\\trp\\R\\inv(\\X-\\M)\\right] Here :math:`X` and :math:`M` are both :math:`m\\times n` matrices, :math:`\\R` @@ -75,28 +79,47 @@ Uppercase subscripts for covariances help keep track where they come from. .. math:: - \\mathbf{X}_{ij} \\sim \\mathcal{MN}(\\mathbf{A}_{ij}, \\Sigma_{\\mathbf{X}i},\\Sigma_{\\mathbf{X}j})\\\\ - \\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{B}_{jk}, \\Sigma_{\\mathbf{Y}j},\\Sigma_{\\mathbf{Y}k})\\\\ - \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{Y}_{jk} + \\mathbf{C}_{ik}, \\Sigma_{\\mathbf{Z}_i}, \\Sigma_{\\mathbf{Z}_k})\\\\ + \\mathbf{X}_{ij} \\sim \\mathcal{MN}(\\mathbf{A}_{ij}, + \\Sigma_{\\mathbf{X}i},\\Sigma_{\\mathbf{X}j})\\\\ + \\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{B}_{jk}, + \\Sigma_{\\mathbf{Y}j},\\Sigma_{\\mathbf{Y}k})\\\\ + \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim + \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{Y}_{jk} + \\mathbf{C}_{ik}, + \\Sigma_{\\mathbf{Z}_i}, \\Sigma_{\\mathbf{Z}_k})\\\\ -We vectorize, and covert to a form we recognize as $y \\sim \\mathcal{N}(Mx+b, \\Sigma)$. +We vectorize, and covert to a form we recognize as +$y \\sim \\mathcal{N}(Mx+b, \\Sigma)$. .. math:: - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{Y}_{jk}+\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})\\\\ - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{Y}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})\\\\ + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim + \\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{Y}_{jk}+\\mathbf{C}_{ik}), + \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})\\\\ + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} + \\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{Y}_{jk}) + + \\vecop(\\mathbf{C}_{ik}), + \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i}) Now we can use our standard gaussian marginalization identity: .. math:: - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i} + (\\I_k\\otimes\\X_{ij})(\\Sigma_{\\mathbf{Y}_k}\\otimes\\Sigma_{\\mathbf{Y}_j})(\\I_k\\otimes\\X_{ij})\\trp )\\\\ + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim + \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{B}_{jk}) + + \\vecop(\\mathbf{C}_{ik}), + \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i} + + (\\I_k\\otimes\\X_{ij})(\\Sigma_{\\mathbf{Y}_k}\\otimes + \\Sigma_{\\mathbf{Y}_j})(\\I_k\\otimes\\X_{ij})\\trp ) Collect terms using the mixed-product property of kronecker products: .. math:: - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{\\mathbf{Y}_k}\\otimes \\X_{ij}\\Sigma_{\\mathbf{Y}_j}\\X_{ij}\\trp) + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim + \\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{B}_{jk}) + + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes + \\Sigma_{\\mathbf{Z}_i} + \\Sigma_{\\mathbf{Y}_k}\\otimes + \\X_{ij}\\Sigma_{\\mathbf{Y}_j}\\X_{ij}\\trp) Now, we can see that the marginal density is a matrix-variate normal only if @@ -108,20 +131,38 @@ and transform it back into a matrix normal: .. math:: - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{k}\\otimes\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{_k}\\otimes \\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp)\\\\ - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{k}\\otimes(\\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp))\\\\ - \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij} \\sim \\mathcal{MN}(\\X\\mathbf{B}_{jk} + \\mathbf{C}_{ik}, \\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp,\\Sigma_{k}) + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim + \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), + \\Sigma_{k}\\otimes\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{_k}\\otimes + \\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp)\\\\ + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim + \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), + \\Sigma_{k}\\otimes(\\Sigma_{\\mathbf{Z}_i} + +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp))\\\\ + \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij} \\sim + \\mathcal{MN}(\\X\\mathbf{B}_{jk} + \\mathbf{C}_{ik}, + \\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp,\\Sigma_{k}) We can do it in the other direction as well, because if -:math:`\\X \\sim \\mathcal{MN}(M, U, V)` then :math:`\\X\\trp \\sim \\mathcal{MN}(M\\trp, V, U)`: +:math:`\\X \\sim \\mathcal{MN}(M, U, V)` then :math:`\\X\\trp \\sim + \\mathcal{MN}(M\\trp, V, U)`: .. math:: - \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{Y}_{jk}\\trp\\mathbf{X}_{ij}\\trp + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k},\\Sigma_{\\mathbf{Z}_i})\\\\ - \\mbox{let } \\Sigma_i := \\Sigma_{\\mathbf{Z}_i}=\\Sigma_{\\mathbf{X}_i} \\\\ + \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim + \\mathcal{MN}(\\mathbf{Y}_{jk}\\trp\\mathbf{X}_{ij}\\trp + + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k},\\Sigma_{\\mathbf{Z}_i})\\\\ + \\mbox{let } \\Sigma_i := + \\Sigma_{\\mathbf{Z}_i}=\\Sigma_{\\mathbf{X}_i} \\\\ \\cdots\\\\ - \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{A}_{jk}\\trp\\mathbf{X}_{ij}\\trp + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k} + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y,\\Sigma_{\\mathbf{Z}_i})\\\\ - \\mathbf{Z}_{ik}\\mid\\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{A}_{jk}+ \\mathbf{C}_{ik},\\Sigma_{\\mathbf{Z}_i},\\Sigma_{\\mathbf{Z}_k} + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y) + \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{Y}_{jk} \\sim + \\mathcal{MN}(\\mathbf{A}_{jk}\\trp\\mathbf{X}_{ij}\\trp + + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k} + + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y,\\Sigma_{\\mathbf{Z}_i})\\\\ + \\mathbf{Z}_{ik}\\mid\\mathbf{Y}_{jk} \\sim + \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{A}_{jk}+ + \\mathbf{C}_{ik},\\Sigma_{\\mathbf{Z}_i},\\Sigma_{\\mathbf{Z}_k} + + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y) These marginal likelihoods are implemented relatively efficiently in `MatnormModelBase.matnorm_logp_marginal_row` and @@ -130,59 +171,87 @@ Partitioned matrix normal conditionals -------------------------------------------------- -Here we extend the multivariate gaussian conditional identity to matrix normals. -This is used for prediction in some models. Below, we use lowercase subscripts -for sizes to make dimensionalities easier to track. Uppercase subscripts for -covariances help keep track where they come from. +Here we extend the multivariate gaussian conditional identity to matrix +normals. This is used for prediction in some models. Below, we +use lowercase subscripts for sizes to make dimensionalities easier to track. +Uppercase subscripts for covariances help keep track where they come from. -Next, we do the same for the partitioned gaussian identity. First two vectorized -matrix-normals that form our partition: +Next, we do the same for the partitioned gaussian identity. First two +vectorized matrix-normals that form our partition: .. math:: - \\mathbf{X}_{ij} &\\sim \\mathcal{MN}(\\mathbf{A}_{ij}, \\Sigma_{i}, \\Sigma_{j}) \\rightarrow \\vecop[\\mathbf{X}_{ij}] \\sim \\mathcal{N}(\\vecop[\\mathbf{A}_{ij}], \\Sigma_{j}\\otimes \\Sigma_{i})\\\\ - \\mathbf{Y}_{ik} &\\sim \\mathcal{MN}(\\mathbf{B}_{ik}, \\Sigma_{i}, \\Sigma_{k}) \\rightarrow \\vecop[\\mathbf{Y}_{ik}] \\sim \\mathcal{N}(\\vecop[\\mathbf{B}_{ik}], \\Sigma_{k}\\otimes \\Sigma_{i})\\\\ + \\mathbf{X}_{ij} &\\sim \\mathcal{MN}(\\mathbf{A}_{ij}, \\Sigma_{i}, + \\Sigma_{j}) \\rightarrow \\vecop[\\mathbf{X}_{ij}] \\sim + \\mathcal{N}(\\vecop[\\mathbf{A}_{ij}], \\Sigma_{j}\\otimes\\Sigma_{i})\\\\ + \\mathbf{Y}_{ik} &\\sim \\mathcal{MN}(\\mathbf{B}_{ik}, \\Sigma_{i}, + \\Sigma_{k}) \\rightarrow \\vecop[\\mathbf{Y}_{ik}] \\sim + \\mathcal{N}(\\vecop[\\mathbf{B}_{ik}], \\Sigma_{k}\\otimes\\Sigma_{i})\\\\ \\begin{bmatrix}\\vecop[\\mathbf{X}_{ij}] \\\\ \\vecop[\\mathbf{Y}_{ik}] \\end{bmatrix} - & \\sim \\mathcal{N}\\left(\\vecop\\begin{bmatrix}\\mathbf{A}_{ij} \\\\ \\mathbf{B}_{ik} + & \\sim \\mathcal{N}\\left(\\vecop\\begin{bmatrix}\\mathbf{A}_{ij} + \\\\ \\mathbf{B}_{ik} \\end{bmatrix} - , \\begin{bmatrix} \\Sigma_{j}\\otimes \\Sigma_i & \\Sigma_{jk} \\otimes \\Sigma_i \\\\ - \\Sigma_{kj}\\otimes \\Sigma_i & \\Sigma_{k} \\otimes \\Sigma_i\\end{bmatrix}\\right) + , \\begin{bmatrix} \\Sigma_{j}\\otimes \\Sigma_i & + \\Sigma_{jk} \\otimes \\Sigma_i \\\\ + \\Sigma_{kj}\\otimes \\Sigma_i & \\Sigma_{k} \\otimes + \\Sigma_i\\end{bmatrix}\\right) We apply the standard partitioned Gaussian identity and simplify using the properties of the :math:`\\vecop` operator and the mixed product property of kronecker products: .. math:: - \\vecop[\\X_{ij}] \\mid \\vecop[\\Y_{ik}]\\sim\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\otimes\\Sigma_i)(\\Sigma_k\\inv\\otimes\\Sigma_i\\inv)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]),\\\\ - & \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\otimes\\Sigma_i)(\\Sigma_k\\inv\\otimes\\Sigma_i\\inv) (\\Sigma_{kj}\\otimes\\Sigma_i))\\\\ - =\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\Sigma_i\\Sigma_i\\inv)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\ - & \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes\\Sigma_i\\Sigma_i\\inv \\Sigma_i))\\\\ - =\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\I)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\ - & \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes\\Sigma_i)\\\\ - =\\mathcal{N}(&\\vecop[\\A_{ij}] + \\vecop[\\Y_{ik}-\\B_{ik}\\Sigma_k\\inv\\Sigma_{kj}], (\\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj})\\otimes\\Sigma_i) + \\vecop[\\X_{ij}] \\mid \\vecop[\\Y_{ik}]\\sim + \\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\otimes\\Sigma_i) + (\\Sigma_k\\inv\\otimes\\Sigma_i\\inv)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]),\\\\ + & \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\otimes\\Sigma_i) + (\\Sigma_k\\inv\\otimes\\Sigma_i\\inv) (\\Sigma_{kj}\\otimes\\Sigma_i))\\\\ + =\\mathcal{N}(&\\vecop[\\A_{ij}] + + (\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\Sigma_i\\Sigma_i\\inv) + (\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\ + & \\Sigma_j\\otimes\\Sigma_i - + (\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes + \\Sigma_i\\Sigma_i\\inv \\Sigma_i))\\\\ + =\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\I) + (\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\ + & \\Sigma_j\\otimes\\Sigma_i - + (\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes\\Sigma_i)\\\\ + =\\mathcal{N}(&\\vecop[\\A_{ij}] + + \\vecop[\\Y_{ik}-\\B_{ik}\\Sigma_k\\inv\\Sigma_{kj}], + (\\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj})\\otimes\\Sigma_i) Next, we recognize that this multivariate gaussian is equivalent to the following matrix variate gaussian: .. math:: - \\X_{ij} \\mid \\Y_{ik}\\sim \\mathcal{MN}(&\\A_{ij} +(\\Y_{ik}-\\B_{ik})\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i, \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}) + \\X_{ij} \\mid \\Y_{ik}\\sim \\mathcal{MN}(&\\A_{ij} + + (\\Y_{ik}-\\B_{ik})\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i, + \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}) The conditional in the other direction can be written by working through the same algebra: .. math:: - \\Y_{ik} \\mid \\X_{ij}\\sim \\mathcal{MN}(&\\B_{ik} +(\\X_{ij}-\\A_{ij})\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i, \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}) + \\Y_{ik} \\mid \\X_{ij}\\sim \\mathcal{MN}(&\\B_{ik} +(\\X_{ij}- + \\A_{ij})\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i, + \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}) Finally, vertical rather than horizontal concatenation (yielding a partitioned row rather than column covariance) can be written by recognizing the behavior of the matrix normal under transposition: .. math:: - \\X\\trp_{ji} \\mid \\Y\\trp_{ki}\\sim \\mathcal{MN}(&\\A\\trp_{ji} +\\Sigma_{jk}\\Sigma_k\\inv(\\Y\\trp_{ki}-\\B\\trp_{ki}), \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i)\\\\ - \\Y\\trp_{ki} \\mid \\X\\trp_{ji}\\sim \\mathcal{MN}(&\\B\\trp_{ki} +\\Sigma_{kj}\\Sigma_j\\inv(\\X\\trp_{ji}-\\A\\trp_{ji}), \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i) - -These conditional likelihoods are implemented relatively efficiently in `MatnormModelBase.matnorm_logp_conditional_row` and `MatnormModelBase.matnorm_logp_conditional_col`. + \\X\\trp_{ji} \\mid \\Y\\trp_{ki}\\sim \\mathcal{MN}(&\\A\\trp_{ji} + + \\Sigma_{jk}\\Sigma_k\\inv(\\Y\\trp_{ki}-\\B\\trp_{ki}), + \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i)\\\\ + \\Y\\trp_{ki} \\mid \\X\\trp_{ji}\\sim \\mathcal{MN}(&\\B\\trp_{ki} + + \\Sigma_{kj}\\Sigma_j\\inv(\\X\\trp_{ji}-\\A\\trp_{ji}), + \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i) + +These conditional likelihoods are implemented relatively efficiently +in `MatnormModelBase.matnorm_logp_conditional_row` and + `MatnormModelBase.matnorm_logp_conditional_col`. """ diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 4990c9136..6c3c6a091 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -131,21 +131,29 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): if scan_onsets is None: self.run_sizes = [size] self.offdiag_template = tf.constant(scipy.linalg.toeplitz(np.r_[0, - 1, np.zeros(size-2)]), dtype=tf.float64) - self.diag_template = tf.constant(np.diag(np.r_[0, np.ones(size-2), 0])) + 1, np.zeros(size-2)]), + dtype=tf.float64) + self.diag_template = tf.constant(np.diag(np.r_[0, + np.ones(size-2), + 0])) else: self.run_sizes = np.ediff1d(np.r_[scan_onsets, size]) sub_offdiags = [scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(r-2)]) for r in self.run_sizes] - self.offdiag_template = tf.constant(scipy.sparse.block_diag(sub_offdiags).toarray()) - subdiags = [np.diag(np.r_[0, np.ones(r-2), 0]) for r in self.run_sizes] - self.diag_template = tf.constant(scipy.sparse.block_diag(subdiags).toarray()) + self.offdiag_template = tf.constant(scipy.sparse. + block_diag(sub_offdiags) + .toarray()) + subdiags = [np.diag(np.r_[0, np.ones(r-2), 0]) + for r in self.run_sizes] + self.diag_template = tf.constant(scipy.sparse. + block_diag(subdiags) + .toarray()) self.I = tf.constant(np.eye(size)) if sigma is None: - self.log_sigma = tf.Variable(tf.random_normal([1], dtype=tf.float64), - name="sigma") + self.log_sigma = tf.Variable(tf.random_normal([1], + dtype=tf.float64), name="sigma") else: self.log_sigma = tf.Variable(np.log(sigma), name="sigma") @@ -170,7 +178,8 @@ def logdet(self): rho = 2 * tf.sigmoid(self.rho_unc) - 1 sigma = tf.exp(self.log_sigma) - return tf.reduce_sum(2 * tf.constant(self.run_sizes, dtype=tf.float64) * + return tf.reduce_sum(2 * tf.constant(self.run_sizes, + dtype=tf.float64) * tf.log(sigma) - tf.log(1 - tf.square(rho))) def Sigma_inv_x(self, X): @@ -199,8 +208,8 @@ class CovIsotropic(CovBase): def __init__(self, size, sigma=None): super(CovIsotropic, self).__init__(size) if sigma is None: - self.log_sigma = tf.Variable(tf.random_normal([1], dtype=tf.float64), - name="sigma") + self.log_sigma = tf.Variable(tf.random_normal([1], + dtype=tf.float64), name="sigma") else: self.log_sigma = tf.Variable(np.log(sigma), name="sigma") @@ -238,8 +247,8 @@ class CovDiagonal(CovBase): def __init__(self, size, sigma=None): super(CovDiagonal, self).__init__(size) if sigma is None: - self.logprec = tf.Variable(tf.random_normal([size], dtype=tf.float64), - name="precisions") + self.logprec = tf.Variable(tf.random_normal([size], + dtype=tf.float64), name="precisions") else: self.logprec = tf.Variable(np.log(1/sigma), name="log-precisions") @@ -281,7 +290,8 @@ class CovDiagonalGammaPrior(CovDiagonal): def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): super(CovDiagonalGammaPrior, self).__init__(size, sigma) - self.ig = InverseGamma(concentration=tf.constant(alpha, dtype=tf.float64), + self.ig = InverseGamma(concentration=tf.constant(alpha, + dtype=tf.float64), rate=tf.constant(beta, dtype=tf.float64)) @define_scope @@ -301,7 +311,8 @@ def __init__(self, size, Sigma=None): name="L_full", dtype="float64") else: # in order to respect the Sigma we got passed in, we log the diag - # which we will later exp. a little ugly but this is a rare use case + # which we will later exp. a little ugly but this + # is a rare use case L = np.linalg.cholesky(Sigma) L[np.diag_indices_from(L)] = np.log(np.diag(L)) self.L_full = tf.Variable(L, name="L_full", @@ -348,14 +359,16 @@ def Sigma_inv_x(self, X): class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): - """Unconstrained noise covariance parameterized in terms of its cholesky factor. + """Unconstrained noise covariance parameterized in terms of its + cholesky factor. Regularized using the trick from Chung et al. 2015 such that as the covariance approaches singularity, the likelihood goes to 0. """ def __init__(self, size, Sigma=None): super(CovUnconstrainedCholeskyWishartReg, self).__init__(size) - self.wishartReg = WishartCholesky(df=tf.constant(size+2, dtype=tf.float64), + self.wishartReg = WishartCholesky(df=tf.constant(size+2, + dtype=tf.float64), scale=tf.constant(1e5 * np.eye(size), dtype=tf.float64)) @@ -370,7 +383,8 @@ def logp(self): class CovUnconstrainedInvCholesky(CovBase): - """Unconstrained noise covariance parameterized in terms of its precision cholesky + """Unconstrained noise covariance parameterized + in terms of its precision cholesky """ def __init__(self, size, invSigma=None): @@ -378,7 +392,8 @@ def __init__(self, size, invSigma=None): self.Linv_full = tf.Variable(tf.random_normal([size, size], dtype=tf.float64), name="Linv_full") else: - self.Linv_full = tf.Variable(np.linalg.cholesky(invSigma), name="Linv_full") + self.Linv_full = tf.Variable(np.linalg.cholesky(invSigma), + name="Linv_full") super(CovUnconstrainedInvCholesky, self).__init__(size) @@ -465,7 +480,8 @@ def __init__(self, sizes, Sigmas=None, mask=None): for i in range(self.nfactors)] else: self.L_full = [tf.Variable(np.linalg.cholesky(Sigmas[i]), - name="L"+str(i)+"_full") for i in range(self.nfactors)] + name="L"+str(i)+"_full") + for i in range(self.nfactors)] self.mask = mask @define_scope @@ -494,10 +510,11 @@ def logdet(self): """ log|Sigma| using the diagonals of the cholesky factors. """ if self.mask is None: - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in self.L]) + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) + for mat in self.L]) n_prod = tf.reduce_prod(n_list) logdet = tf.stack([tf.reduce_sum(tf.log(tf.diag_part(mat))) - for mat in self.L]) + for mat in self.L]) logdetfinal = tf.reduce_sum((logdet*n_prod)/n_list) else: n_list = [tf.shape(mat)[0] for mat in self.L] @@ -506,7 +523,8 @@ def logdet(self): for i in range(self.nfactors): indices = list(range(self.nfactors)) indices.remove(i) - logdet += tf.log(tf.diag_part(self.L[i])) * tf.to_double(tf.reduce_sum(mask_reshaped, indices)) + logdet += tf.log(tf.diag_part(self.L[i])) *\ + tf.to_double(tf.reduce_sum(mask_reshaped, indices)) logdetfinal = tf.reduce_sum(logdet) return (2.0*logdetfinal) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index c6398d8f7..b2f256dd0 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -15,17 +15,20 @@ def solve_det_marginal(x, sigma, A, Q): Use matrix inversion lemma for the solve: .. math:: (\Sigma + AQA')^{-1} X =\\ - \Sigma^{-1} - \Sigma^{-1} A (Q^{-1} + A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} + \Sigma^{-1} - \Sigma^{-1} A (Q^{-1} + + A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} Use matrix determinant lemma for determinant: ..math:: - \log|(\Sigma + AQA')| = \log|Q^{-1} + A' \Sigma^{-1} A| + \log|Q| + \log|\Sigma| + \log|(\Sigma + AQA')| = \log|Q^{-1} + A' \Sigma^{-1} A| + + \log|Q| + \log|\Sigma| """ # we care about condition number of i_qf if logging.getLogger().isEnabledFor(logging.DEBUG): A = tf.Print(A, [_condition(Q.Sigma_inv + tf.matmul(A, - sigma.Sigma_inv_x(A), transpose_a=True))], 'i_qf condition') + sigma.Sigma_inv_x(A), transpose_a=True))], + 'i_qf condition') # since the sigmas expose only inverse, we invert their # conditions to get what we want A = tf.Print(A, [1/_condition(Q.Sigma_inv)], 'Q condition') @@ -42,7 +45,8 @@ def solve_det_marginal(x, sigma, A, Q): if logging.getLogger().isEnabledFor(logging.DEBUG): logdet = tf.Print(logdet, [Q.logdet], 'Q logdet') logdet = tf.Print(logdet, [sigma.logdet], 'sigma logdet') - logdet = tf.Print(logdet, [2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(i_qf_cholesky)))], + logdet = tf.Print(logdet, [2 * tf.reduce_sum(tf.log( + tf.matrix_diag_part(i_qf_cholesky)))], 'iqf logdet') # A' Sigma^{-1} @@ -61,11 +65,13 @@ def solve_det_conditional(x, sigma, A, Q): Use matrix inversion lemma for the solve: .. math:: (\Sigma - AQ^{-1}A')^{-1} X =\\ - \Sigma^{-1} + \Sigma^{-1} A (Q - A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} X + \Sigma^{-1} + \Sigma^{-1} A (Q - + A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} X Use matrix determinant lemma for determinant: ..math:: - \log|(\Sigma - AQ^{-1}A')| = \log|Q - A' \Sigma^{-1} A| - \log|Q| + \log|\Sigma| + \log|(\Sigma - AQ^{-1}A')| = + \log|Q - A' \Sigma^{-1} A| - \log|Q| + \log|\Sigma| """ # (Q - A' Sigma^{-1} A) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 74fa445fd..4516930db 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -31,16 +31,18 @@ class MNRSA(BaseEstimator): 2. MNRSA does not estimate the nuisance timecourse X_0. Instead, we expect the temporal noise covariance to capture the same property - (because when marginalizing over B_0 gives a low-rank component to the noise - covariance, something we hope to have available soon. + (because when marginalizing over B_0 gives a low-rank component + to the noise covariance, something we hope to have available soon. For users: in general, if you are worried about voxels each having - different temporal noise structure,you should use `brainiak.reprsimil.BRSA`. - If you are worried about between-voxel correlations or temporal covaraince - structures that BRSA does not support, you should use MNRSA. + different temporal noise structure,you should use + `brainiak.reprsimil.BRSA`. If you are worried about between-voxel + correlations or temporal covaraince structures that BRSA does not + support, you should use MNRSA. .. math:: - Y \\sim \\mathcal{MN}(0, \\Sigma_t + XLL^{\\top}X^{\\top}+ X_0X_0^{\\top}, \\Sigma_s) + Y \\sim \\mathcal{MN}(0, \\Sigma_t + XLL^{\\top}X^{\\top}+ + X_0X_0^{\\top}, \\Sigma_s) U = LL^{\\top} Parameters @@ -121,7 +123,8 @@ def fit(self, X, y, structured_RSA_cov=None): L_indeterminate = tf.matrix_band_part(self.L_full, -1, 0) self.L = tf.matrix_set_diag(L_indeterminate, - tf.exp(tf.matrix_diag_part(L_indeterminate))) + tf.exp(tf.matrix_diag_part( + L_indeterminate))) self.train_variables.extend([self.L_full]) @@ -134,12 +137,16 @@ def fit(self, X, y, structured_RSA_cov=None): options=self.optCtrl) if logging.getLogger().isEnabledFor(logging.INFO): - optimizer._packed_loss_grad = tf.Print(optimizer._packed_loss_grad, - [tf.reduce_min(optimizer._packed_loss_grad)], - 'mingrad') - optimizer._packed_loss_grad = tf.Print(optimizer._packed_loss_grad, - [tf.reduce_max(optimizer._packed_loss_grad)], - 'maxgrad') + optimizer._packed_loss_grad = tf.Print( + optimizer._packed_loss_grad, + [tf.reduce_min( + optimizer._packed_loss_grad)], + 'mingrad') + optimizer._packed_loss_grad = tf.Print( + optimizer._packed_loss_grad, + [tf.reduce_max( + optimizer._packed_loss_grad)], + 'maxgrad') optimizer._packed_loss_grad = tf.Print(optimizer._packed_loss_grad, [self.logp()], 'logp') diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 54d30a81c..fedd98074 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -26,8 +26,8 @@ def __init__(self, time_noise_cov, space_noise_cov, optimizer='L-BFGS-B', optCtrl=None): self.optCtrl, self.optMethod = optCtrl, optimizer - - self.time_noise_cov, self.space_noise_cov = time_noise_cov, space_noise_cov + self.time_noise_covtime_noise_cov + self.space_noise_cov = space_noise_cov self.n_t = time_noise_cov.size self.n_v = space_noise_cov.size @@ -85,12 +85,13 @@ def fit(self, X, y): # initialize to the least squares solution (basically all # we need now is the cov) - sigma_inv_x = self.time_noise_cov.Sigma_inv_x(self.X).eval(session=self.sess, - feed_dict=feed_dict) - sigma_inv_y = self.time_noise_cov.Sigma_inv_x(self.Y).eval(session=self.sess, - feed_dict=feed_dict) + sigma_inv_x = self.time_noise_cov.Sigma_inv_x(self.X)\ + .eval(session=self.sess, feed_dict=feed_dict) + sigma_inv_y = self.time_noise_cov.Sigma_inv_x(self.Y)\ + .eval(session=self.sess, feed_dict=feed_dict) - beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) + beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), + (X.T).dot(sigma_inv_y)) self.beta = tf.Variable(beta_init, name="beta") @@ -143,7 +144,8 @@ def calibrate(self, Y): # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) # (B Sigma_s^{-1} B')^{-1} - B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp).eval(session=self.sess) + B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp)\ + .eval(session=self.sess) X_test = np.linalg.solve(B_Sigma_Btrp.T, Y_Sigma_Btrp.T).T From 34087a79aa53cd392ad053eee5695189cb8be48e Mon Sep 17 00:00:00 2001 From: Narayanan Sundaram Date: Mon, 2 Apr 2018 15:21:20 -0700 Subject: [PATCH 04/84] Fix style issues --- brainiak/utils/utils.py | 93 +++++++++++++++++++++++++---------------- 1 file changed, 56 insertions(+), 37 deletions(-) diff --git a/brainiak/utils/utils.py b/brainiak/utils/utils.py index 40adc64b6..29dc805c9 100644 --- a/brainiak/utils/utils.py +++ b/brainiak/utils/utils.py @@ -833,11 +833,12 @@ def tf_solve_lower_triangular_kron(L, y): xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) t = xinb / L[0][i, i] xinb = tf_solve_lower_triangular_kron(L[1:], t) - xina = xina - tf.reshape(tf.tile - (tf.slice(L[0], [i+1, i], [na-i-1, 1]), - [1, nb*col]), [(na-i-1)*nb, col]) * \ - tf.reshape(tf.tile(tf.reshape - (t, [-1, 1]), [na-i-1, 1]), [(na-i-1)*nb, col]) + xina = xina - tf.reshape( + tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), [1, nb*col]), + [(na-i-1)*nb, col]) * \ + tf.reshape( + tf.tile(tf.reshape(t, [-1, 1]), [na-i-1, 1]), + [(na-i-1)*nb, col]) x = tf.concat(axis=0, values=[xt, xinb, xina]) return x @@ -878,11 +879,13 @@ def tf_solve_upper_triangular_kron(L, y): xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) t = xinb / L[0][i, i] xinb = tf_solve_upper_triangular_kron(L[1:], t) - xt = xt - tf.reshape(tf.tile(tf.transpose - (tf.slice(L[0], [i, 0], [1, i])), - [1, nb*col]), [i*nb, col]) * \ - tf.reshape(tf.tile(tf.reshape - (t, [-1, 1]), [i, 1]), [i*nb, col]) + xt = (xt + - tf.reshape( + tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), + [1, nb*col]), + [i*nb, col]) + * tf.reshape( + tf.tile(tf.reshape(t, [-1, 1]), [i, 1]), [i*nb, col])) x = tf.concat(axis=0, values=[xt, xinb, xina]) return x @@ -915,11 +918,15 @@ def tf_kron_mult(L, x): n_prod = tf.to_int32(tf.reduce_prod(n_list)) nb = tf.to_int32(n_prod/na) col = tf.shape(x)[1] - xt = tf_kron_mult(L[1:], tf.transpose(tf.reshape(tf.transpose(x), [-1, nb]))) + xt = tf_kron_mult( + L[1:], + tf.transpose(tf.reshape(tf.transpose(x), [-1, nb]))) y = tf.zeros_like(x) for i in range(na): ya, yb, yc = tf.split(y, [i*nb, nb, (na-i-1)*nb], 0) - yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), tf.transpose(tf.slice(L[0], [i,0], [1, na]))), [nb, col]) + yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), + tf.transpose(tf.slice(L[0], [i, 0], [1, na]))), + [nb, col]) y = tf.concat(axis=0, values=[ya, yb, yc]) return y @@ -930,7 +937,7 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): Arguments --------- - L : 2-D tensor + L : 2-D tensor Must be a tensorflow tensor and must be a triangular matrix of dimension n x n @@ -954,14 +961,16 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): """ zero = tf.constant(0, dtype=tf.int32) - mask_mat = tf.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1,1]), tf.reshape(mask, [1, -1])), zero)) + mask_mat = tf.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1, 1]), + tf.reshape(mask, [1, -1])), zero)) q = tf.to_int32(tf.sqrt(tf.to_double(tf.shape(mask_mat)[0]))) - L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q,q]) + L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q]) maskindex = tf.where(tf.not_equal(mask, zero)) y_masked = tf.gather_nd(y, maskindex) - x_s1 = tf.matrix_triangular_solve(L_masked, y_masked, lower=lower, adjoint=adjoint) + x_s1 = tf.matrix_triangular_solve(L_masked, y_masked, + lower=lower, adjoint=adjoint) x = tf.scatter_nd(maskindex, x_s1, tf.to_int64(tf.shape(y))) return x @@ -981,17 +990,20 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): Dimension [n_0*n_1*..n_(m-1)), p] mask: 1-D tensor - Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0 for don't care + Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0 + for don't care Returns ------- x : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p, values at rows for which mask == 0 are set to zero + Dimension (n_0*n_1*..n_(m-1)) x p, values at rows + for which mask == 0 are set to zero """ n = len(L) if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=False) + return tf_masked_triangular_solve(L[0], y, mask, + lower=True, adjoint=False) else: x = y na = L[0].get_shape().as_list()[0] @@ -999,25 +1011,28 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): n_prod = tf.to_int32(tf.reduce_prod(n_list)) nb = tf.to_int32(n_prod/na) col = tf.shape(x)[1] - zero = tf.constant(0, dtype=tf.int32) for i in range(na): mask_b = tf.slice(mask, [i*nb], [nb]) xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) t = xinb / L[0][i, i] - if tf.reduce_sum(mask_b) != nb: + if tf.reduce_sum(mask_b) != nb: xinb = tf_solve_lower_triangular_masked_kron(L[1:], t, mask_b) t_masked = tf_kron_mult(L[1:], xinb) - else: #all valid - same as no mask + else: + # all valid - same as no mask xinb = tf_solve_lower_triangular_kron(L[1:], t) t_masked = t - xina = xina - tf.reshape(tf.tile - (tf.slice(L[0], [i+1, i], [na-i-1, 1]), - [1, nb*col]), [(na-i-1)*nb, col]) * \ - tf.reshape(tf.tile(tf.reshape - (t_masked, [-1, 1]), [na-i-1, 1]), [(na-i-1)*nb, col]) + xina = (xina + - tf.reshape( + tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), + [1, nb*col]), + [(na-i-1)*nb, col]) + * tf.reshape( + tf.tile(tf.reshape(t_masked, [-1, 1]), [na-i-1, 1]), + [(na-i-1)*nb, col])) x = tf.concat(axis=0, values=[xt, xinb, xina]) @@ -1039,17 +1054,20 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): Dimension [n_0*n_1*..n_(m-1)), p] mask: 1-D tensor - Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0 for don't care + Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows + and 0 for don't care Returns ------- x : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p, values at rows for which mask == 0 are set to zero + Dimension (n_0*n_1*..n_(m-1)) x p, values at rows + for which mask == 0 are set to zero """ n = len(L) if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=True) + return tf_masked_triangular_solve(L[0], y, mask, + lower=True, adjoint=True) else: x = y na = L[0].get_shape().as_list()[0] @@ -1057,7 +1075,6 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): n_prod = tf.to_int32(tf.reduce_prod(n_list)) nb = tf.to_int32(n_prod/na) col = tf.shape(x)[1] - zero = tf.constant(0, dtype=tf.int32) L1_end_tr = [tf.transpose(x) for x in L[1:]] for i in range(na-1, -1, -1): @@ -1072,12 +1089,14 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): xinb = tf_solve_upper_triangular_kron(L[1:], t) t_masked = t - xt = xt - tf.reshape(tf.tile(tf.transpose - (tf.slice(L[0], [i, 0], [1, i])), - [1, nb*col]), [i*nb, col]) * \ - tf.reshape(tf.tile(tf.reshape - (t_masked, [-1, 1]), [i, 1]), [i*nb, col]) + xt = (xt + - tf.reshape( + tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), + [1, nb*col]), + [i*nb, col]) + * tf.reshape( + tf.tile(tf.reshape(t_masked, [-1, 1]), [i, 1]), + [i*nb, col])) x = tf.concat(axis=0, values=[xt, xinb, xina]) return x - From a75d86653cf5834f6b5c1d8e9bbafad1f1ea0dac Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Mon, 2 Apr 2018 20:20:31 -0400 Subject: [PATCH 05/84] more linter fixes --- brainiak/matnormal/__init__.py | 3 --- brainiak/matnormal/mnrsa.py | 2 ++ brainiak/matnormal/regression.py | 5 ++++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index 3645af1d9..fe7b8a235 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -1,6 +1,3 @@ -from .mnrsa import MNRSA -from .regression import MatnormRegression - """The matrix variate normal distribution, with conditional and marginal identities ========================================================================================== diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 4516930db..e5af214a3 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -8,6 +8,8 @@ from tensorflow.contrib.opt import ScipyOptimizerInterface import logging +__all__ = ['MNRSA'] + class MNRSA(BaseEstimator): """ Matrix normal version of RSA. diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index fedd98074..c2d19fa27 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -4,6 +4,8 @@ from brainiak.matnormal.matnormal_likelihoods import matnorm_logp from tensorflow.contrib.opt import ScipyOptimizerInterface +__all__ = ['MatnormRegression'] + class MatnormRegression(BaseEstimator): """ This analysis allows maximum likelihood estimation of regression models @@ -140,7 +142,8 @@ def calibrate(self, Y): cannot decode.") # Sigma_s^{-1} B' - Sigma_s_btrp = self.space_noise_cov.Sigma_inv_x(tf.transpose(self.beta)) + Sigma_s_btrp = self.space_noise_cov.Sigma_inv_x(tf.transpose( + self.beta)) # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) # (B Sigma_s^{-1} B')^{-1} From 3dcf915ad942b843b3abf6787cf473f8a094d4a6 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Mon, 2 Apr 2018 20:24:24 -0400 Subject: [PATCH 06/84] fix ambiguous varname --- brainiak/matnormal/covs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 6c3c6a091..223ed70b8 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -149,7 +149,7 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): block_diag(subdiags) .toarray()) - self.I = tf.constant(np.eye(size)) + self._identity_mat = tf.constant(np.eye(size)) if sigma is None: self.log_sigma = tf.Variable(tf.random_normal([1], @@ -197,7 +197,7 @@ def Sigma_inv(self): """ rho = 2 * tf.sigmoid(self.rho_unc) - 1 sigma = tf.exp(self.log_sigma) - return (self.I - rho * self.offdiag_template + rho**2 * + return (self._identity_mat - rho * self.offdiag_template + rho**2 * self.diag_template) / tf.square(sigma) From e9da3331e1497bd4fdfb97065c97fe0125ff53bb Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Sun, 8 Apr 2018 06:35:41 +0100 Subject: [PATCH 07/84] linter fixes in tests --- tests/matnormal/test_cov.py | 92 +++++++++++++------ tests/matnormal/test_matnormal_logp.py | 2 +- .../test_matnormal_logp_conditional.py | 3 +- .../matnormal/test_matnormal_logp_marginal.py | 7 +- tests/matnormal/test_matnormal_regression.py | 14 +-- tests/matnormal/test_matnormal_rsa.py | 2 +- 6 files changed, 79 insertions(+), 41 deletions(-) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 0ba0cab1e..342caed1a 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -1,7 +1,15 @@ import numpy as np from numpy.testing import assert_allclose from scipy.stats import norm, wishart, invgamma -from brainiak.matnormal.covs import * +from brainiak.matnormal.covs import (CovIdentity, + CovAR1, + CovIsotropic, + CovDiagonal, + CovDiagonalGammaPrior, + CovUnconstrainedCholesky, + CovUnconstrainedCholeskyWishartReg, + CovUnconstrainedInvCholesky, + CovKroneckerFactored) import tensorflow as tf import pytest import logging @@ -17,6 +25,7 @@ rtol = 1e-7 atol = 1e-7 + def logdet_sinv_np(X, sigma): # logdet _, logdet_np = np.linalg.slogdet(sigma) @@ -34,7 +43,7 @@ def logdet_sinv_np_mask(X, sigma, mask): # sigma-inv sinv_np_ = np.linalg.inv(sigma[np.ix_(mask_indices, mask_indices)]) # sigma-inverse * - sinvx_np_ = sinv_np_.dot(X[mask_indices,:]) + sinvx_np_ = sinv_np_.dot(X[mask_indices, :]) sinv_np = np.zeros_like(sigma) sinv_np[np.ix_(mask_indices, mask_indices)] = sinv_np_ @@ -62,7 +71,8 @@ def test_CovConstant(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) def test_CovIdentity(): @@ -77,7 +87,8 @@ def test_CovIdentity(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) def test_CovIsotropic(): @@ -92,7 +103,8 @@ def test_CovIsotropic(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) def test_CovDiagonal(): @@ -107,7 +119,9 @@ def test_CovDiagonal(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) + def test_CovDiagonal_initialized(): @@ -121,13 +135,15 @@ def test_CovDiagonal_initialized(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) def test_CovDiagonalGammaPrior(): cov_np = np.diag(np.exp(np.random.normal(size=m))) - cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, beta=1e-10) + cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, + beta=1e-10) ig = invgamma(1.5, scale=1e-10) @@ -139,7 +155,8 @@ def test_CovDiagonalGammaPrior(): penalty_np = np.sum(ig.logpdf(1/np.diag(cov_np))) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) assert_allclose(penalty_np, cov.logp.eval(session=sess), rtol=rtol) @@ -155,7 +172,8 @@ def test_CovUnconstrainedCholesky(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) def test_CovUnconstrainedCholeskyWishartReg(): @@ -170,11 +188,13 @@ def test_CovUnconstrainedCholeskyWishartReg(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) # now compute the regularizer reg = wishart.logpdf(cov_np, df=m+2, scale=1e10 * np.eye(m)) assert_allclose(reg, cov.logp.eval(session=sess), rtol=rtol) + def test_CovUnconstrainedInvCholesky(): cov = CovUnconstrainedInvCholesky(size=m) @@ -187,10 +207,12 @@ def test_CovUnconstrainedInvCholesky(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) + def test_Cov2FactorKron(): - assert(m%2 == 0) + assert(m % 2 == 0) dim1 = int(m/2) dim2 = 2 @@ -206,15 +228,18 @@ def test_Cov2FactorKron(): # compute the naive version L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) - cov_np = np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose())) + cov_np = np.kron(np.dot(L1, L1.transpose()), + np.dot(L2, L2.transpose())) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) + def test_Cov3FactorKron(): - assert(m%4 == 0) + assert(m % 4 == 0) dim1 = int(m/4) dim2 = 2 dim3 = 2 @@ -227,16 +252,19 @@ def test_Cov3FactorKron(): L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) L3 = (cov.L[2]).eval(session=sess) - cov_np = np.kron(np.kron(np.dot(L1, L1.transpose()),\ - np.dot(L2, L2.transpose())), np.dot(L3, L3.transpose())) + cov_np = np.kron(np.kron(np.dot(L1, L1.transpose()), + np.dot(L2, L2.transpose())), + np.dot(L3, L3.transpose())) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) + def test_Cov3FactorMaskedKron(): - assert(m%4 == 0) + assert(m % 4 == 0) dim1 = int(m/4) dim2 = 2 dim3 = 2 @@ -244,7 +272,7 @@ def test_Cov3FactorMaskedKron(): mask = np.random.binomial(1, 0.5, m).astype(np.int32) if sum(mask == 0): - mask[0] = 1 + mask[0] = 1 mask_indices = np.nonzero(mask)[0] cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3], mask=mask) @@ -256,13 +284,18 @@ def test_Cov3FactorMaskedKron(): L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) L3 = (cov.L[2]).eval(session=sess) - cov_np_factor = np.kron(L1, np.kron(L2, L3))[np.ix_(mask_indices, mask_indices)] + cov_np_factor = np.kron(L1, np.kron(L2, L3))[np.ix_(mask_indices, + mask_indices)] cov_np = np.dot(cov_np_factor, cov_np_factor.transpose()) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X[mask_indices,:], cov_np) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X[mask_indices, :], + cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol, atol=atol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess)[np.ix_(mask_indices, mask_indices)], rtol=rtol, atol=atol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess)[mask_indices,:], rtol=rtol, atol=atol) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol, + atol=atol) + assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess)[ + np.ix_(mask_indices, mask_indices)], rtol=rtol, atol=atol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess)[ + mask_indices, :], rtol=rtol, atol=atol) def test_CovAR1(): @@ -277,8 +310,8 @@ def test_CovAR1(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) - + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) def test_CovAR1_scan_onsets(): @@ -293,4 +326,5 @@ def test_CovAR1_scan_onsets(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index 95c27127f..bb714b3c4 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -4,7 +4,7 @@ import tensorflow as tf from brainiak.utils.brsa_gendata import rmn from brainiak.matnormal.matnormal_likelihoods import matnorm_logp -from brainiak.matnormal.covs import CovIdentity,CovUnconstrainedCholesky +from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging logging.basicConfig(level=logging.DEBUG) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index ad373b463..c64da0934 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -3,7 +3,8 @@ from scipy.stats import wishart, multivariate_normal import tensorflow as tf from brainiak.utils.brsa_gendata import rmn -from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_conditional_col, matnorm_logp_conditional_row +from matnormal_likelihoods import (matnorm_logp_conditional_col, + matnorm_logp_conditional_row) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index 57436f4d8..c89e03d2d 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -3,7 +3,8 @@ from scipy.stats import multivariate_normal import tensorflow as tf from brainiak.utils.brsa_gendata import rmn -from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_col, matnorm_logp_marginal_row +from matnormal_likelihoods import (matnorm_logp_marginal_col, + matnorm_logp_marginal_row) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging @@ -43,7 +44,7 @@ def test_against_scipy_mvn_row_marginal(): rowcov_np)) tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, - A_tf, Q) + A_tf, Q) assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) @@ -71,5 +72,5 @@ def test_against_scipy_mvn_col_marginal(): colcov_np)) tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, - A_tf, Q) + A_tf, Q) assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index fd94ba34d..46f9676aa 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -1,10 +1,11 @@ import numpy as np from scipy.stats import norm, wishart, pearsonr -from brainiak.matnormal.covs import\ - CovIdentity, CovUnconstrainedCholesky, CovUnconstrainedInvCholesky, CovDiagonal +from brainiak.matnormal.covs import (CovIdentity, + CovUnconstrainedCholesky, + CovUnconstrainedInvCholesky, + CovDiagonal) from brainiak.matnormal import MatnormRegression from brainiak.utils.brsa_gendata import rmn -import pytest import logging logging.basicConfig(level=logging.DEBUG) @@ -59,6 +60,7 @@ def test_matnorm_regression_unconstrainedprec(): assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + def test_matnorm_regression_optimizerChoice(): # Y = XB + eps @@ -74,12 +76,14 @@ def test_matnorm_regression_optimizerChoice(): row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedInvCholesky(size=p) - model = MatnormRegression(time_cov=row_cov, space_cov=col_cov, optimizer="CG") + model = MatnormRegression(time_cov=row_cov, space_cov=col_cov, + optimizer="CG") model.fit(X, Y) assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + def test_matnorm_regression_scaledDiag(): # Y = XB + eps @@ -101,5 +105,3 @@ def test_matnorm_regression_scaledDiag(): model.fit(X, Y) assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) - - diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index 881eeb3d1..26c409592 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -16,7 +16,7 @@ def gen_U_nips2016_example(): U = np.eye(n_C) * 0.6 U[8:12, 8:12] = 0.8 for cond in range(8, 12): - U[cond,cond] = 1 + U[cond, cond] = 1 return U From 5ae24f11b02478187a61f73f03f8fa9ec3b99aab Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Sun, 8 Apr 2018 06:49:07 +0100 Subject: [PATCH 08/84] broke this to make the linter happy, fixing --- tests/matnormal/test_matnormal_regression.py | 2 +- tests/matnormal/test_matnormal_rsa.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 46f9676aa..1dffd7c07 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -4,7 +4,7 @@ CovUnconstrainedCholesky, CovUnconstrainedInvCholesky, CovDiagonal) -from brainiak.matnormal import MatnormRegression +from brainiak.matnormal.regression import MatnormRegression from brainiak.utils.brsa_gendata import rmn import logging diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index 26c409592..5afe0e9fa 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -1,4 +1,4 @@ -from brainiak.matnormal import MNRSA +from brainiak.matnormal.mnrsa import MNRSA from brainiak.utils.utils import cov2corr from brainiak.matnormal.covs import CovIdentity, CovDiagonal from scipy.stats import norm From 01aed20ff611b46bc10f33d020ef8f54f6931a70 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Mon, 9 Apr 2018 18:04:22 +0100 Subject: [PATCH 09/84] more cleanup from hacky copypaste-squash --- brainiak/matnormal/regression.py | 32 +++++++++---------- brainiak/matnormal/utils.py | 8 +++++ tests/matnormal/test_matnormal_logp.py | 2 +- .../test_matnormal_logp_conditional.py | 7 ++-- .../matnormal/test_matnormal_logp_marginal.py | 7 ++-- tests/matnormal/test_matnormal_regression.py | 2 +- tests/matnormal/test_matnormal_rsa.py | 5 +-- 7 files changed, 35 insertions(+), 28 deletions(-) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index c2d19fa27..2c4bf0ec1 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -12,27 +12,27 @@ class MatnormRegression(BaseEstimator): in the presence of both spatial and temporal covariance. ..math:: - Y \\sim \\mathcal{MN}(X\beta, time_noise_cov, space_noise_cov) + Y \\sim \\mathcal{MN}(X\beta, time_cov, space_cov) Parameters ---------- - time_noise_cov : subclass of CovBase + time_cov : subclass of CovBase TR noise covariance class following CovBase interface. - space_noise_cov : subclass of CovBase + space_cov : subclass of CovBase Voxel noise covariance class following CovBase interface. learnRate : real, default=0.01 Step size for the Adam optimizer """ - def __init__(self, time_noise_cov, space_noise_cov, + def __init__(self, time_cov, space_cov, optimizer='L-BFGS-B', optCtrl=None): self.optCtrl, self.optMethod = optCtrl, optimizer - self.time_noise_covtime_noise_cov - self.space_noise_cov = space_noise_cov + self.time_cov = time_cov + self.space_cov = space_cov - self.n_t = time_noise_cov.size - self.n_v = space_noise_cov.size + self.n_t = time_cov.size + self.n_v = space_cov.size self.Y = tf.placeholder(tf.float64, [self.n_t, self.n_v], name="Y") @@ -47,7 +47,7 @@ def logp(self): """ y_hat = tf.matmul(self.X, self.beta) resid = self.Y - y_hat - return matnorm_logp(resid, self.time_noise_cov, self.space_noise_cov) + return matnorm_logp(resid, self.time_cov, self.space_cov) def fit(self, X, y): """ Compute the regression fit. @@ -60,13 +60,13 @@ def fit(self, X, y): fMRI data voxel_pos: np.array, n_voxels by 3, default: None Spatial positions of voxels (optional). - If provided, and if space_noise_cov is a CovGP, the positions + If provided, and if space_cov is a CovGP, the positions for computing the GP covaraince matrix. Otherwise CovGP defaults to distances of 1 unit between all voxels. Ignored by non-GP noise covariances. times : np.array, TRs by 1, default:None Timestamps of observations (optional). - If provided, and if time_noise_cov is a CovGP, the the times + If provided, and if time_cov is a CovGP, the the times for computing the GP covaraince matrix. Otherwise CovGP defaults to distances of 1 unit between all times. Ignored by non-GP noise covariances. @@ -87,9 +87,9 @@ def fit(self, X, y): # initialize to the least squares solution (basically all # we need now is the cov) - sigma_inv_x = self.time_noise_cov.Sigma_inv_x(self.X)\ + sigma_inv_x = self.time_cov.Sigma_inv_x(self.X)\ .eval(session=self.sess, feed_dict=feed_dict) - sigma_inv_y = self.time_noise_cov.Sigma_inv_x(self.Y)\ + sigma_inv_y = self.time_cov.Sigma_inv_x(self.Y)\ .eval(session=self.sess, feed_dict=feed_dict) beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), @@ -98,8 +98,8 @@ def fit(self, X, y): self.beta = tf.Variable(beta_init, name="beta") self.train_variables = [self.beta] - self.train_variables.extend(self.time_noise_cov.get_optimize_vars()) - self.train_variables.extend(self.space_noise_cov.get_optimize_vars()) + self.train_variables.extend(self.time_cov.get_optimize_vars()) + self.train_variables.extend(self.space_cov.get_optimize_vars()) self.sess.run(tf.variables_initializer([self.beta])) @@ -142,7 +142,7 @@ def calibrate(self, Y): cannot decode.") # Sigma_s^{-1} B' - Sigma_s_btrp = self.space_noise_cov.Sigma_inv_x(tf.transpose( + Sigma_s_btrp = self.space_cov.Sigma_inv_x(tf.transpose( self.beta)) # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 97b5a1f38..c051fb1f0 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -1,5 +1,13 @@ import functools # https://danijar.com/structuring-your-tensorflow-models/ import tensorflow as tf +from scipy.stats import norm +from numpy.linalg import cholesky + + +def rmn(rowcov, colcov): + # generate random draws from a zero-mean matrix-normal distribution + Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0]))) + return(cholesky(rowcov).dot(Z).dot(cholesky(colcov))) def doublewrap(function): diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index bb714b3c4..db39166ce 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -2,7 +2,7 @@ from numpy.testing import assert_allclose from scipy.stats import multivariate_normal import tensorflow as tf -from brainiak.utils.brsa_gendata import rmn +from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import matnorm_logp from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index c64da0934..0c4d549bf 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -2,9 +2,10 @@ from numpy.testing import assert_allclose from scipy.stats import wishart, multivariate_normal import tensorflow as tf -from brainiak.utils.brsa_gendata import rmn -from matnormal_likelihoods import (matnorm_logp_conditional_col, - matnorm_logp_conditional_row) +from brainiak.matnormal.utils import rmn +from brainiak.matnormal.matnormal_likelihoods import ( + matnorm_logp_conditional_col, + matnorm_logp_conditional_row) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index c89e03d2d..a9cc43dc2 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -2,9 +2,10 @@ from numpy.testing import assert_allclose from scipy.stats import multivariate_normal import tensorflow as tf -from brainiak.utils.brsa_gendata import rmn -from matnormal_likelihoods import (matnorm_logp_marginal_col, - matnorm_logp_marginal_row) +from brainiak.matnormal.utils import rmn +from brainiak.matnormal.matnormal_likelihoods import ( + matnorm_logp_marginal_col, + matnorm_logp_marginal_row) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 1dffd7c07..5ebdceb9e 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -5,7 +5,7 @@ CovUnconstrainedInvCholesky, CovDiagonal) from brainiak.matnormal.regression import MatnormRegression -from brainiak.utils.brsa_gendata import rmn +from brainiak.matnormal.utils import rmn import logging logging.basicConfig(level=logging.DEBUG) diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index 5afe0e9fa..bb7b99fb7 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -3,6 +3,7 @@ from brainiak.matnormal.covs import CovIdentity, CovDiagonal from scipy.stats import norm from numpy.linalg import cholesky +from brainiak.matnormal.utils import rmn import numpy as np import logging @@ -21,10 +22,6 @@ def gen_U_nips2016_example(): return U -def rmn(rowcov, colcov): - # generate random draws from a zero-mean matrix-normal distribution - Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0]))) - return(cholesky(rowcov).dot(Z).dot(cholesky(colcov))) def gen_brsa_data_matnorm_model(U, n_T, n_V, space_cov, time_cov, n_nureg): From 9ca2cef0221a50f88b3bce6f2d60b4c98f72ac33 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Mon, 9 Apr 2018 18:10:17 +0100 Subject: [PATCH 10/84] More linter checks (for some reason run-checks.sh ignores /tests on my machine) --- tests/matnormal/test_matnormal_rsa.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index bb7b99fb7..f69dc4845 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -2,7 +2,6 @@ from brainiak.utils.utils import cov2corr from brainiak.matnormal.covs import CovIdentity, CovDiagonal from scipy.stats import norm -from numpy.linalg import cholesky from brainiak.matnormal.utils import rmn import numpy as np import logging @@ -22,8 +21,6 @@ def gen_U_nips2016_example(): return U - - def gen_brsa_data_matnorm_model(U, n_T, n_V, space_cov, time_cov, n_nureg): n_C = U.shape[0] From 9ee9734336b24fe9902976551aa3842761675742 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Mon, 9 Apr 2018 18:40:48 +0100 Subject: [PATCH 11/84] fixing sphinx complaints --- brainiak/matnormal/__init__.py | 6 +++--- brainiak/matnormal/matnormal_likelihoods.py | 24 ++++++++++----------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index fe7b8a235..41b2ff24b 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -1,5 +1,5 @@ """The matrix variate normal distribution, - with conditional and marginal identities +with conditional and marginal identities ========================================================================================== .. math:: @@ -143,7 +143,7 @@ We can do it in the other direction as well, because if :math:`\\X \\sim \\mathcal{MN}(M, U, V)` then :math:`\\X\\trp \\sim - \\mathcal{MN}(M\\trp, V, U)`: +\\mathcal{MN}(M\\trp, V, U)`: .. math:: \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim @@ -249,6 +249,6 @@ These conditional likelihoods are implemented relatively efficiently in `MatnormModelBase.matnorm_logp_conditional_row` and - `MatnormModelBase.matnorm_logp_conditional_col`. +`MatnormModelBase.matnorm_logp_conditional_col`. """ diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index b2f256dd0..a036aa9f5 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -14,14 +14,14 @@ def solve_det_marginal(x, sigma, A, Q): """ Use matrix inversion lemma for the solve: .. math:: - (\Sigma + AQA')^{-1} X =\\ - \Sigma^{-1} - \Sigma^{-1} A (Q^{-1} + - A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} + (\Sigma + AQA')^{-1} X =\\ + \Sigma^{-1} - \Sigma^{-1} A (Q^{-1} + + A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} Use matrix determinant lemma for determinant: - ..math:: - \log|(\Sigma + AQA')| = \log|Q^{-1} + A' \Sigma^{-1} A| - + \log|Q| + \log|\Sigma| + .. math:: + \log|(\Sigma + AQA')| = \log|Q^{-1} + A' \Sigma^{-1} A| + + \log|Q| + \log|\Sigma| """ # we care about condition number of i_qf @@ -64,14 +64,14 @@ def solve_det_conditional(x, sigma, A, Q): """ Use matrix inversion lemma for the solve: .. math:: - (\Sigma - AQ^{-1}A')^{-1} X =\\ - \Sigma^{-1} + \Sigma^{-1} A (Q - - A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} X + (\Sigma - AQ^{-1}A')^{-1} X =\\ + \Sigma^{-1} + \Sigma^{-1} A (Q - + A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} X Use matrix determinant lemma for determinant: - ..math:: - \log|(\Sigma - AQ^{-1}A')| = - \log|Q - A' \Sigma^{-1} A| - \log|Q| + \log|\Sigma| + .. math:: + \log|(\Sigma - AQ^{-1}A')| = + \log|Q - A' \Sigma^{-1} A| - \log|Q| + \log|\Sigma| """ # (Q - A' Sigma^{-1} A) From 94ff22c16be4a3d3e428c7cc95822502fce91564 Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Wed, 11 Apr 2018 11:09:53 +0100 Subject: [PATCH 12/84] original-style SRM --- brainiak/matnormal/srm_margs.py | 236 ++++++++++++++++ tests/matnormal/test_matnormal_srm.py | 377 ++++++++++++++++++++++++++ 2 files changed, 613 insertions(+) create mode 100644 brainiak/matnormal/srm_margs.py create mode 100644 tests/matnormal/test_matnormal_srm.py diff --git a/brainiak/matnormal/srm_margs.py b/brainiak/matnormal/srm_margs.py new file mode 100644 index 000000000..e1f5d5098 --- /dev/null +++ b/brainiak/matnormal/srm_margs.py @@ -0,0 +1,236 @@ +import tensorflow as tf +from pymanopt import Problem +from pymanopt.manifolds import Stiefel, Euclidean +from pymanopt.solvers import TrustRegions, ConjugateGradient +from sklearn.base import BaseEstimator +from brainiak.matnormal.covs import CovIdentity +import numpy as np +from brainiak.matnormal.matnormal_likelihoods import ( + matnorm_logp_marginal_col, + matnorm_logp) +from tensorflow.contrib.opt import ScipyOptimizerInterface +import logging + + +logger = logging.getLogger(__name__) + +class MNSRM_OrthoW(BaseEstimator): + """Probabilistic SRM, aka SRM with marginalization over S (and ortho W) + """ + + def __init__(self, n_features=5, time_noise_cov=CovIdentity, + space_noise_cov=CovIdentity, s_cov=CovIdentity, + optMethod="L-BFGS-B",optCtrl={}): + + self.k = n_features + + self.time_noise_cov_class = time_noise_cov + self.space_noise_cov_class = space_noise_cov + self.marg_cov_class = s_cov + + self.optCtrl, self.optMethod = optCtrl, optMethod + + # create a tf session we reuse for this object + self.sess = tf.Session() + + def _eye(self, x): + return tf.diag(tf.ones((x), dtype=tf.float64)) + + def _make_Q_op(self): + mean = self.X - self.b - tf.matmul(self.w, tf.tile(tf.expand_dims(self.s_prime, 0), [self.n, 1, 1]) ) + + det_terms = -(self.v*self.n + self.k)*self.time_cov.logdet -\ + (self.t*self.n)*self.space_cov.logdet -\ + self.t*self.marg_cov.logdet +\ + (self.t*self.v)*tf.reduce_sum(tf.log(self.rhoprec)) + + # used twice below + trace_t_t = tf.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) + + # covs don't support batch ops (yet!) (TODO): + x_quad_form = -tf.trace(tf.reduce_sum([tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(mean[j])), + self.space_cov.Sigma_inv_x(mean[j]))*self.rhoprec[j] + for j in range(self.n)], 0)) + + w_quad_form = -tf.trace(tf.reduce_sum([tf.matmul(tf.matmul(self.scov_prime, tf.transpose(self.w[j])), + self.space_cov.Sigma_inv_x(self.w[j]))*self.rhoprec[j] + for j in range(self.n)], 0)) * trace_t_t + + s_quad_form = -tf.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(self.s_prime)), + self.marg_cov.Sigma_inv_x(self.s_prime))) + + sig_trace_prod = -trace_t_t * tf.trace(self.marg_cov.Sigma_inv_x(self.scov_prime)) + + return 0.5 * (det_terms + x_quad_form + s_quad_form + w_quad_form + sig_trace_prod)#, det_terms, x_quad_form, s_quad_form, w_quad_form, sig_trace_prod + + def make_estep_ops(self): + + tcov_prime = self.time_cov.Sigma + Xmb = self.X - self.b + + sprec_chol = tf.cholesky(self.marg_cov.Sigma_inv + tf.reduce_sum([tf.matmul(tf.transpose(self.w[j]), self.space_cov.Sigma_inv_x(self.w[j]))*self.rhoprec[j] for j in range(self.n)], 0)) + + wsig_x = tf.reduce_sum([tf.matmul(tf.transpose(self.w[j]), self.space_cov.Sigma_inv_x(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], 0) + + scov_prime = tf.cholesky_solve(sprec_chol, self._eye(self.k)) + + s_prime = tf.cholesky_solve(sprec_chol, wsig_x) + + return s_prime, scov_prime, tcov_prime + + def make_mstep_b_op(self): + + return tf.expand_dims(tf.reduce_sum([self.time_cov.Sigma_inv_x(tf.transpose(self.X[j] - + tf.matmul(self.w[j],self.s_prime))) + for j in range(self.n)], 1) / + tf.reduce_sum(self.time_cov.Sigma_inv), -1) + + def make_mstep_rhoprec_op(self): + + mean = self.X - self.b - tf.matmul(self.w, tf.tile(tf.expand_dims(self.s_prime,0), [self.n, 1, 1]) ) + + mean_trace = tf.stack([tf.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(mean[j])), + self.space_cov.Sigma_inv_x(mean[j]))) for j in range(self.n)]) + + trace_t_t = tf.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) + + w_trace = trace_t_t * tf.stack([tf.trace(tf.matmul(tf.matmul(self.scov_prime, tf.transpose(self.w[j])), + self.space_cov.Sigma_inv_x(self.w[j]))) for j in range(self.n)]) + + rho_hat_unscaled = mean_trace + w_trace + + return (self.v*self.t) / rho_hat_unscaled + + def fit(self, X, n_iter=10, y=None, w_cov=None): + """ + find W marginalizing S + + Parameters + ---------- + X: 2d array + Brain data matrix (voxels by TRs). Y in the math + n_iter: int, default=10 + Number of iterations to run + """ + + self.n = len(X) + + self.v, self.t = X[0].shape + + self.X = tf.constant(X, name="X") + + xsvd = [np.linalg.svd(x)for x in X] + + # parameters + self.b = tf.Variable(np.random.normal(size=(self.n, self.v,1)), name="b") + self.rhoprec = tf.Variable(np.ones(self.n), name="rhoprec") + wlist_np = [sv[0][:,:self.k] for sv in xsvd] + self.wlist = [tf.Variable(_w) for _w in wlist_np] + self.w = tf.stack(self.wlist) + self.space_cov = self.space_noise_cov_class(size=self.v) + self.time_cov = self.time_noise_cov_class(size=self.t) + self.marg_cov = self.time_noise_cov_class(size=self.k) + + # sufficient statistics + self.s_prime = tf.Variable(np.average([sv[2][:self.k, :] for sv in xsvd], 0), dtype=tf.float64, name="s_prime") + self.scov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") + self.tcov_prime = tf.Variable(np.eye(self.t), name="wcov_prime") + + # self.Lambda = tf.diag(tf.ones(self.k, dtype=tf.float64)) * 1000 # just there for the q improvement assertion check + + s_prime_op, scov_prime_op, tcov_prime_op = self.make_estep_ops() + + # can update these guys in closed form + b_op = self.make_mstep_b_op() + rhoprec_op = self.make_mstep_rhoprec_op() + + q_op = self._make_Q_op() + + sigma_v_opt = ScipyOptimizerInterface(-q_op, + var_list=self.space_cov.get_optimize_vars(), + method=self.optMethod, + options=self.optCtrl) + + sigma_t_opt = ScipyOptimizerInterface(-q_op, + var_list=self.time_cov.get_optimize_vars(), + method=self.optMethod, + options=self.optCtrl) + + sigma_s_opt = ScipyOptimizerInterface(-q_op, + var_list=self.marg_cov.get_optimize_vars(), + method=self.optMethod, + options=self.optCtrl) + + w_manifold = Stiefel(self.t, self.k) + # s_trp_manifold = Euclidean(self.t, self.k) + solver = ConjugateGradient() + # this would be faster but need to work through some dtype wrangling with + # the internals of pymanopt + # solver = TrustRegions() + + w_problems = [Problem(manifold=w_manifold, cost=-q_op, arg=_w, verbosity=0) for _w in self.wlist] + + # hacky hack hack to let us maintain state on the things we're not pymanopting + for i in range(self.n): + w_problems[i].backend._session = self.sess + + self.sess.run(tf.global_variables_initializer()) + + for em_iter in range(n_iter): + q_start = q_op.eval(session=self.sess) + logger.info("Iter %i, Q at start %f" % (em_iter, q_start)) + + # ESTEP + # compute all the terms with old vals + s_prime_new = s_prime_op.eval(session=self.sess) + tcov_prime_new = tcov_prime_op.eval(session=self.sess) + scov_prime_new = scov_prime_op.eval(session=self.sess) + + # then update (since we reuse wcov_prime in computing w_prime) + self.s_prime.load(s_prime_new, session=self.sess) + self.scov_prime.load(scov_prime_new, session=self.sess) + self.tcov_prime.load(tcov_prime_new, session=self.sess) + + q_end_estep = q_op.eval(session=self.sess) + logger.info("Iter %i, Q at estep end %f" % (em_iter, q_end_estep)) + + # MSTEP + # analytic parts: b and rho! that's sort of bad actually + self.b.load(b_op.eval(session=self.sess), session=self.sess) + rhoprec_new = rhoprec_op.eval(session=self.sess) + # rhoprec_norm = tf.norm(rhoprec_new - self.rhoprec).eval(session=self.sess) / self.n + self.rhoprec.load(rhoprec_new, session=self.sess) + + # optimization parts: + for i in range(self.n): + new_w = solver.solve(w_problems[i], x=self.wlist[i].eval(session=self.sess)) + self.wlist[i].load(new_w, session=self.sess) + + if self.space_noise_cov_class is not CovIdentity: + sigma_v_opt.minimize(session=self.sess) + + if self.time_noise_cov_class is not CovIdentity: + sigma_t_opt.minimize(session=self.sess) + + if self.marg_cov_class is not CovIdentity: + sigma_s_opt.minimize(session=self.sess) + + q_end_mstep = q_op.eval(session=self.sess) + logger.info("Iter %i, Q at mstep end %f" % (em_iter, q_end_mstep)) + assert q_end_estep >= q_start + assert q_end_mstep >= q_end_estep + + self.w_ = self.w.eval(session=self.sess) + self.s_ = self.s_prime.eval(session=self.sess) + self.rho_ = 1/self.rhoprec.eval(session=self.sess) + + + def transform(self, X): + vprec = self.space_cov.Sigma_inv.eval(session=self.sess) + return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) + + def transform_orthow(self, X): + # orthonormalize W + w_ortho = [w @ np.linalg.svd(w.T @ w)[0] / np.sqrt(np.linalg.svd(w.T @ w)[1]) for w in self.w_] + vprec = self.space_cov.Sigma_inv.eval(session=self.sess) + return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) \ No newline at end of file diff --git a/tests/matnormal/test_matnormal_srm.py b/tests/matnormal/test_matnormal_srm.py new file mode 100644 index 000000000..309459ce9 --- /dev/null +++ b/tests/matnormal/test_matnormal_srm.py @@ -0,0 +1,377 @@ +import numpy as np +import tensorflow as tf +# from brainiak.matnormal.srm_em_analytic import MatnormSRM_EM_Analytic +from brainiak.matnormal.srm_margs import MNSRM_OrthoW +from brainiak.matnormal.covs import CovUnconstrainedCholesky, CovDiagonal +from numpy.testing import assert_allclose +from brainiak.matnormal.utils import rmn +from scipy.stats import norm, pearsonr, invwishart +import logging + +logging.basicConfig(level=logging.INFO) + +n_T = 10 +n_V = 15 +n_features = 4 +n_subj = 3 + +def rmse(x, xtrue): + return np.sqrt(np.average((x-xtrue)**2)) + + +def estep(W, sigma_v, sigma_t, sigma_s, b, rho, X): + + sigma_t_prime = sigma_t + vinv = np.linalg.inv(sigma_v) + wsw = np.zeros((n_features, n_features)) + wsx = np.zeros((n_features, n_T)) + xmb = X - b + + for j in range(n_subj): + xmb_j = xmb[j*n_V:(j+1)*n_V] + w_j = W[j*n_V:(j+1)*n_V] + wsw = wsw + w_j.T @ np.linalg.solve(sigma_v, w_j) / rho[j] + wsx = wsx + w_j.T @ np.linalg.solve(sigma_v, xmb_j) / rho[j] + + # additional savings here via cholesky probably + sigma_s_prime_inv = np.linalg.inv(sigma_s) + wsw + s_prime = np.linalg.solve(sigma_s_prime_inv, wsx) + sigma_s_prime = np.linalg.solve(sigma_s_prime_inv, np.eye(sigma_s.shape[0])) + return s_prime, sigma_s_prime, sigma_t_prime + + +def ldet(s): + return np.linalg.slogdet(s)[1] + + +def gen_srm_data(n_T, n_V, n_subj, n_features, vcov=None, tcov=None, scov=None, ortho_w=False): + + if scov is None: + sigma_w = sigma_s = np.eye(n_features) + else: + sigma_w = scov + sigma_s = scov + + if vcov is None: + sigma_v = invwishart.rvs(size=1, df=n_V+2,scale = np.eye(n_V)) + else: + sigma_v = vcov + + if tcov is None: + sigma_t = invwishart.rvs(size=1, df=n_T+2,scale = np.eye(n_T)) + else: + sigma_t = tcov + + rho = np.exp(np.random.normal(size=n_subj)) + + W = rmn(np.kron(np.diag(rho), sigma_v), sigma_w) + + wlist = W.reshape(n_subj, n_V, n_features) + if ortho_w: + for i in range(n_subj): + u, s, v = np.linalg.svd(wlist[i].T @ wlist[i]) + wnew = wlist[i] @ u @ np.diag(1/np.sqrt(s)) @ v + # wnew = u @ np.diag(1/np.sqrt(s)) @ v @ wlist[i] + assert_allclose(wnew.T @ wnew, np.eye(n_features), rtol=1e-5, atol=1e-5) + wlist[i] = wnew + + W = wlist.reshape(n_subj*n_V, n_features) + S = rmn(sigma_s, sigma_t) + + b = np.random.normal(size=(n_subj * n_V, 1)) + ws = W @ S + b + X = ws + rmn(np.kron(np.diag(rho), sigma_v), sigma_t) + theta = W, S, b, sigma_v, sigma_t, sigma_s, 1/rho + true_sufficient_stats = estep(W, sigma_v, sigma_t, sigma_s, b, rho, X) + return X, theta, ws, true_sufficient_stats + + +def Q(W, sigma_v, sigma_t, sigma_s, b, rho, X, sigma_s_prime, sigma_t_prime, s_prime): + + v = sigma_v.shape[0] + n = rho.shape[0] + t = sigma_t.shape[0] + k = sigma_s.shape[0] + + kroncov = np.kron(np.diag(rho), sigma_v) + + mean = (X - b - W @ s_prime) + + det_terms = -(v*n)*ldet(sigma_t) - t*n*ldet(sigma_v) - t*ldet(sigma_s) - t*v*np.sum(np.log(rho)) - (k)*ldet(sigma_t) + + x_quad_form = -np.trace(np.linalg.solve(sigma_t, mean.T) @ np.linalg.solve(kroncov, mean)) + s_quad_form = -np.trace(np.linalg.solve(sigma_t, s_prime.T) @ np.linalg.solve(sigma_s, s_prime)) + + lik_trace = -np.trace(np.linalg.solve(sigma_t, sigma_t_prime)) * np.trace(sigma_s_prime @ W.T @ np.linalg.solve(kroncov, W)) + prior_trace = -np.trace(np.linalg.solve(sigma_t, sigma_t_prime)) * np.trace(np.linalg.solve(sigma_s, sigma_s_prime)) + + return 0.5 * (det_terms + x_quad_form + s_quad_form + lik_trace + prior_trace)#, det_terms, x_quad_form, s_quad_form, lik_trace, prior_trace + +def mstep_b(W, sigma_v, sigma_t, sigma_s, b, rho, X, sigma_s_prime, sigma_t_prime, s_prime): + + t = sigma_t.shape[0] + b_hat = ((X - W @ s_prime) @ np.linalg.solve(sigma_t, np.ones((t,1)))) / np.sum(np.linalg.inv(sigma_t)) + return b_hat + +def mstep_rho(W, sigma_v, sigma_t, sigma_s, b, rho, X, sigma_s_prime, sigma_t_prime, s_prime): + mean = (X - b - W @ s_prime) + n, v, t = rho.shape[0], sigma_v.shape[0], sigma_t.shape[0] + rho_grad = np.zeros(n) + + rho_hat = np.zeros(n) + for j in range(n): + mean_j = mean[j*v:(j+1)*v] + w_j = W[j*v:(j+1)*v] + rho_hat[j] = (np.trace(np.linalg.solve(sigma_t, mean_j.T) @ np.linalg.solve(sigma_v, mean_j)) + + np.trace(np.linalg.solve(sigma_t, sigma_t_prime)) * np.trace(sigma_s_prime @ w_j.T @ np.linalg.solve(sigma_v, w_j)))/(t*v) + + return rho_hat + +def _load_model_params(model, s_prime, b, rhoprec, Xstack, scov_prime, tcov_prime, w): + model.s_prime.load(s_prime, session=model.sess) + model.b.load(b.reshape(n_subj, n_V, 1), session=model.sess) + model.rhoprec.load(rhoprec, session=model.sess) + model.w.load(w.reshape(n_subj,n_V,n_features), session=model.sess) + model.scov_prime.load(scov_prime, session=model.sess) + model.tcov_prime.load(tcov_prime, session=model.sess) + + +def _init_all(): + + Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features) + s_prime, sigma_s_prime, sigma_t_prime = true_sufficient_stats + + W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta + + q_np = Q(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, sigma_s_prime, sigma_t_prime, s_prime) + + X = Xstack.reshape(n_subj, n_V, n_T) + + model = MNSRM_OrthoW(n_features=n_features) + + model.n = len(X) + + model.v, model.t = X[0].shape + + model.X = tf.constant(X, name="X") + + xsvd = [np.linalg.svd(x)for x in X] + + # parameters + model.b = tf.Variable(np.random.normal(size=(model.n, model.v,1)), name="b") + model.rhoprec = tf.Variable(np.ones(model.n), name="rhoprec") + model.w = tf.Variable(np.array([sv[0][:,:model.k] for sv in xsvd]), name="w") + + # sufficient statistics + model.s_prime = tf.Variable(np.average([sv[2][:model.k, :] for sv in xsvd], 0), dtype=tf.float64, name="s_prime") + model.tcov_prime = tf.Variable(np.eye(model.t), name="wcov_prime") + model.scov_prime = tf.Variable(np.eye(model.k), name="vcov_prime") + + model.space_cov = CovFullRankCholesky(size=n_V, Sigma=sigma_v) + model.time_cov = CovFullRankCholesky(size=n_T, Sigma=sigma_t) + model.marg_cov = CovFullRankCholesky(size=n_features, Sigma=sigma_s) + + model.sess.run(tf.global_variables_initializer()) + + _load_model_params(model, s_prime, b, rhoprec, Xstack, sigma_s_prime, sigma_t_prime, W) + + return Xstack, theta, ws, true_sufficient_stats, model + + +def test_Q(): + + # q_op, det_terms_op, x_quad_form_op, s_quad_form_op, lik_trace_op, prior_trace_op = model._make_Q_op() + + Xstack, theta, ws, true_sufficient_stats, model = _init_all() + + q_op = model._make_Q_op() + + q_tf = q_op.eval(session=model.sess) + # det_terms_tf = det_terms_op.eval(session=model.sess) + # x_quad_form_tf = x_quad_form_op.eval(session=model.sess) + # s_quad_form_tf = s_quad_form_op.eval(session=model.sess) + # lik_trace_tf = lik_trace_op.eval(session=model.sess) + # prior_trace_tf = prior_trace_op.eval(session=model.sess) + + # q_np, det_terms_np, x_quad_form_np, s_quad_form_np, lik_trace_np, prior_trace_np = Q(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, sigma_s_prime, sigma_t_prime, s_prime) + + s_prime, sigma_s_prime, sigma_t_prime = true_sufficient_stats + + W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta + + q_np = Q(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, sigma_s_prime, sigma_t_prime, s_prime) + + assert_allclose(q_tf, q_np) + + +def test_estep(): + + Xstack, theta, ws, true_sufficient_stats, model = _init_all() + + s_prime_op, scov_prime_op, tcov_prime_op = model.make_estep_ops() + + s_prime_tf = s_prime_op.eval(session=model.sess) + scov_prime_tf = scov_prime_op.eval(session=model.sess) + tcov_prime_tf = tcov_prime_op.eval(session=model.sess) + + W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta + + s_prime_np, scov_prime_np, tcov_prime_np = estep(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack) + + assert_allclose(s_prime_np, s_prime_tf) + assert_allclose(scov_prime_np, scov_prime_tf) + assert_allclose(tcov_prime_np, tcov_prime_tf) + + +def test_mstep(): + Xstack, theta, ws, true_sufficient_stats, model = _init_all() + W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta + + b_op = model.make_mstep_b_op() + # S_op = model.make_mstep_S_op() + rhoprec_op = model.make_mstep_rhoprec_op() + + s_prime, scov_prime, tcov_prime = estep(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack) + + b_tf = b_op.eval(session=model.sess) + + b_np = mstep_b(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, scov_prime, tcov_prime, s_prime) + + assert_allclose(b_np.reshape(n_subj, n_V, 1), b_tf) + + rhoprec_tf = rhoprec_op.eval(session=model.sess) + + rhoprec_np = 1/mstep_rho(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, scov_prime, tcov_prime, s_prime) + assert_allclose(rhoprec_tf, rhoprec_np) + +def test_dpsrm_identity_covs(): + + sigma_t = np.eye(n_T) + sigma_v = np.eye(n_V) + sigma_w = sigma_s = np.eye(n_features) + + Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, + vcov=sigma_v, tcov=sigma_t, + scov=sigma_s, ortho_w=True) + X = Xstack.reshape(n_subj, n_V, n_T) + + model = MNSRM_OrthoW(n_features=n_features, space_noise_cov=CovIdentity, time_noise_cov=CovIdentity) + + + model.fit(X, n_iter=10) + + reconstructed_WS = model.w_.dot(model.s_) + rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) + assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) + + + +def test_mnsrm_ecm(): + + sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) + sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) + + Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, + vcov=sigma_v, tcov=sigma_t, + scov=None, ortho_w=True) + + X = Xstack.reshape(n_subj, n_V, n_T) + + model = MNSRM_OrthoW(n_features=n_features, space_noise_cov=CovDiagonal, time_noise_cov=CovDiagonal) + + model.fit(X, n_iter=10) + + reconstructed_WS = model.w_.dot(model.s_) + rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) + assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) + + +def test_dpmnsrm_transform(): + sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) + sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) + + Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, + vcov=sigma_v, tcov=sigma_t, + wcov=None, ortho_s=False) + + W = theta[0] + X = Xstack.reshape(n_subj, n_V, n_T) + + model = DPMNSRM(n_features=n_features, space_noise_cov=CovDiagonal, time_noise_cov=CovDiagonal) + + model.fit(X, max_iter=50, convergence_tol=0.001) + + newS = np.random.normal(size=(n_subj, n_features, n_T)) + + newX = np.array([w @ s for (w,s) in zip(W.reshape(n_subj, n_V, n_features), newS)]) + np.random.normal(size=(n_subj, n_V, n_T)) + + new_WS = np.array([w @ s for (w,s) in zip(W.reshape(n_subj, n_V, n_features), newS)]) + + projected_wS = np.array([w @ s for (w,s) in zip(model.w_, model.transform(newX))]) + rmse(new_WS.flatten(), projected_wS.flatten()) + pearsonr(new_WS.flatten(), projected_wS.flatten()) + assert(pearsonr(new_WS.flatten(), projected_wS.flatten())[0] > 0.8) + +def test_dpmnsrm_orthos(): + sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) + sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) + sigma_w = invwishart.rvs(size=1, df=n_features+2,scale = np.eye(n_features)) + + Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, + vcov=sigma_v, tcov=sigma_t, + wcov=sigma_w) + + W = theta[0] + X = Xstack.reshape(n_subj, n_V, n_T) + + model = DPMNSRM(n_features=n_features, space_noise_cov=CovDiagonal, + time_noise_cov=CovDiagonal, + w_cov=CovFullRankCholesky, + s_constraint='ortho') + + model.fit(X, max_iter=50, convergence_tol=0.001) + + reconstructed_WS = model.w_.dot(model.s_) + rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) + assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) + + +def test_dpsrm_identity_covs(): + sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) + sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) + sigma_w = invwishart.rvs(size=1, df=n_features+2,scale = np.eye(n_features)) + + Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, + vcov=sigma_v, tcov=sigma_t, + wcov=sigma_w) + + W = theta[0] + X = Xstack.reshape(n_subj, n_V, n_T) + + model = DPMNSRM(n_features=n_features) + + model.fit(X, max_iter=50, convergence_tol=0.001) + + reconstructed_WS = model.w_.dot(model.s_) + rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) + assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) + +def test_dpsrm_identity_covs_orthos(): + sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) + sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) + sigma_w = invwishart.rvs(size=1, df=n_features+2,scale = np.eye(n_features)) + + Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, + vcov=sigma_v, tcov=sigma_t, + wcov=sigma_w) + + W = theta[0] + X = Xstack.reshape(n_subj, n_V, n_T) + + model = DPMNSRM(n_features=n_features, s_constraint="ortho", w_cov=CovFullRankCholesky) + + model.fit(X, max_iter=50, convergence_tol=0.001) + + reconstructed_WS = model.w_.dot(model.s_) + rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) + assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) \ No newline at end of file From 114721dd50135e7f320a2dda3c267e342f4293ff Mon Sep 17 00:00:00 2001 From: Mike Shvartsman Date: Fri, 20 Apr 2018 17:09:05 -0400 Subject: [PATCH 13/84] WIP dual probabilistic MN-SRM --- brainiak/matnormal/covs.py | 35 ++- brainiak/matnormal/dpmnsrm.py | 419 ++++++++++++++++++++++++++ tests/matnormal/test_cov.py | 24 +- tests/matnormal/test_matnormal_srm.py | 377 ----------------------- 4 files changed, 476 insertions(+), 379 deletions(-) create mode 100644 brainiak/matnormal/dpmnsrm.py delete mode 100644 tests/matnormal/test_matnormal_srm.py diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 223ed70b8..8a5cb5b89 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -19,7 +19,8 @@ 'CovUnconstrainedCholesky', 'CovUnconstrainedCholeskyWishartReg', 'CovUnconstrainedInvCholesky', - 'CovKroneckerFactored'] + 'CovKroneckerFactored', + 'CovScaleMixin'] class CovBase: @@ -540,3 +541,35 @@ def Sigma_inv_x(self, X): z = tf_solve_lower_triangular_masked_kron(self.L, X, self.mask) x = tf_solve_upper_triangular_masked_kron(self.L, z, self.mask) return x + + +class CovScaleMixin: + """ wraps a Cov, adds a scaler (e.g. for subject-specific variances) + """ + def __init__(self, base_cov, scale=1.0): + self._baseCov = base_cov + self._scale = scale + + @define_scope + def logdet(self): + """ log|Sigma| + """ + return self._baseCov.logdet + tf.log(self._scale) * self._baseCov.size + + def Sigma_inv_x(self, X): + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """ + return self._baseCov.Sigma_inv_x(X) / self._scale + + @define_scope + def Sigma(self): + """return Sigma + """ + return self._baseCov.Sigma * self._scale + + @define_scope + def Sigma_inv(self): + """ Sigma^{-1}. Override me with more efficient + implementation in subclasses + """ + return self._baseCov.Sigma_inv / self._scale diff --git a/brainiak/matnormal/dpmnsrm.py b/brainiak/matnormal/dpmnsrm.py new file mode 100644 index 000000000..1c7ddd85c --- /dev/null +++ b/brainiak/matnormal/dpmnsrm.py @@ -0,0 +1,419 @@ +import tensorflow as tf +from pymanopt import Problem +from pymanopt.manifolds import Stiefel, Euclidean +from pymanopt.solvers import TrustRegions, ConjugateGradient +from sklearn.base import BaseEstimator +from brainiak.matnormal.covs import (CovIdentity, + CovScaleMixin, + CovTFWrap, + CovUnconstrainedCholesky) +import numpy as np +from brainiak.matnormal.matnormal_likelihoods import ( + matnorm_logp_marginal_col, matnorm_logp) +from tensorflow.contrib.opt import ScipyOptimizerInterface +import logging + + +logger = logging.getLogger(__name__) + + +class DPMNSRM(BaseEstimator): + """Dual probabilistic SRM, aka SRM with marginalization over W + """ + + def __init__(self, n_features=5, time_noise_cov=CovIdentity, + space_noise_cov=CovIdentity, w_cov=CovIdentity, + s_constraint="gaussian", optMethod="L-BFGS-B", optCtrl={}, + improvement_tol=1e-5, algorithm="ECM"): + + self.k = n_features + self.s_constraint = s_constraint + self.improvement_tol = improvement_tol + self.algorithm = algorithm + if s_constraint == "ortho": + logger.info("Orthonormal S selected") + elif s_constraint == "gaussian": + logger.info("Gaussian S selected") + if w_cov is not CovIdentity: + logger.warn("Gaussian S means w_cov can be I w.l.o.g., using\ + more general covs not recommended") + else: + logger.error("Unknown s_constraint! Defaulting to orthonormal.") + self.s_constraint = "ortho" + + self.time_noise_cov_class = time_noise_cov + self.space_noise_cov_class = space_noise_cov + self.marg_cov_class = w_cov + + self.optCtrl, self.optMethod = optCtrl, optMethod + + # create a tf session we reuse for this object + self.sess = tf.Session() + + def _eye(self, x): + return tf.diag(tf.ones((x), dtype=tf.float64)) + + def _make_logp_op(self): + """ MatnormSRM Log-likelihood""" + subj_space_covs = [CovScaleMixin(base_cov=self.space_cov, + scale=1/self.rhoprec[j]) for j in range(self.n)] + if self.marg_cov_class is CovIdentity: + return tf.reduce_sum( + [matnorm_logp_marginal_col(self.X[j], + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=self.S, + marg_cov=CovIdentity(size=self.k)) + for j in range(self.n)], name="lik_logp") + + elif self.marg_cov_class is CovUnconstrainedCholesky: + return tf.reduce_sum( + [matnorm_logp_marginal_col(self.X[j], + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=tf.matmul(self.marg_cov.L, self.S), + marg_cov=CovIdentity(size=self.k)) + for j in range(self.n)], name="lik_logp") + else: + logger.warn("ECME with cov that is not identity or unconstrained may\ + yield numerical instabilities! Use ECM for now.") + return tf.reduce_sum( + [matnorm_logp_marginal_col(self.X[j], + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=self.S, + marg_cov=self.marg_cov) + for j in range(self.n)], name="lik_logp") + + def _make_Q_op(self): + + mean = self.X - self.b - tf.matmul(self.w_prime, + tf.tile(tf.expand_dims(self.S, 0), + [self.n, 1, 1])) + + # covs don't support batch ops (yet!) (TODO): + x_quad_form = -tf.trace(tf.reduce_sum( + [tf.matmul(self.time_cov.Sigma_inv_x( + tf.transpose(mean[j])), + self.space_cov.Sigma_inv_x(mean[j])) * + self.rhoprec[j] + for j in range(self.n)], 0)) + + w_quad_form = -tf.trace(tf.reduce_sum( + [tf.matmul(self.marg_cov.Sigma_inv_x( + tf.transpose(self.w_prime[j])), + self.space_cov.Sigma_inv_x(self.w_prime[j])) * + self.rhoprec[j] + for j in range(self.n)], 0)) + + if self.s_constraint == "gaussian": + s_quad_form = -tf.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(self.S)), self.S)) + det_terms = -(self.v*self.n+self.k) * self.time_cov.logdet -\ + (self.k+self.t)*self.n*self.space_cov.logdet +\ + (self.k+self.t)*self.v*tf.reduce_sum(tf.log(self.rhoprec)) -\ + (self.n*self.v)*self.marg_cov.logdet + else: + s_quad_form = 0 + det_terms = -(self.v*self.n)*self.time_cov.logdet -\ + (self.k+self.t)*self.n*self.space_cov.logdet +\ + (self.k+self.t)*self.v*tf.reduce_sum(tf.log(self.rhoprec)) -\ + (self.n*self.v)*self.marg_cov.logdet + + trace_prod = -tf.reduce_sum(self.rhoprec / self.rhoprec_prime) *\ + tf.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ + (tf.trace(tf.matmul(self.wcov_prime, self.marg_cov.Sigma_inv + + tf.matmul(self.S, self.time_cov.Sigma_inv_x( + tf.transpose(self.S)))))) + + return 0.5 * (det_terms + + x_quad_form + + w_quad_form + + trace_prod + + s_quad_form) + + def make_estep_ops(self): + + rhoprec_prime = self.rhoprec + vcov_prime = self.space_cov.Sigma + wchol = tf.cholesky(self.marg_cov.Sigma_inv + + tf.matmul(self.S, self.time_cov.Sigma_inv_x( + tf.transpose(self.S)))) + + wcov_prime = tf.cholesky_solve(wchol, self._eye(self.k)) + + stacked_rhs = tf.tile(tf.expand_dims(self.time_cov.Sigma_inv_x( + tf.transpose(tf.cholesky_solve(wchol, self.S))), 0), + [self.n, 1, 1]) + + w_prime = tf.matmul(self.X-self.b, stacked_rhs) + + return w_prime, rhoprec_prime, vcov_prime, wcov_prime + + def make_mstep_b_op(self): + return tf.expand_dims(tf.reduce_sum( + [self.time_cov.Sigma_inv_x(tf.transpose(self.X[j] - + tf.matmul(self.w_prime[j], self.S))) + for j in range(self.n)], 1) / + tf.reduce_sum(self.time_cov.Sigma_inv), -1) + + def make_mstep_S_op(self): + wtw = tf.reduce_sum( + [tf.matmul(self.w_prime[j], + self.space_cov.Sigma_inv_x(self.w_prime[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], 0) + + wtx = tf.reduce_sum( + [tf.matmul(self.w_prime[j], + self.space_cov.Sigma_inv_x(self.X[j]-self.b[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], 0) + + return tf.matrix_solve(wtw + + tf.reduce_sum(self.rhoprec / + self.rhoprec_prime) * + tf.trace(self.space_cov.Sigma_inv_x( + self.vcov_prime)) * + self.wcov_prime + self._eye(self.k), wtx) + + def make_mstep_rhoprec_op(self): + + mean = self.X - self.b -\ + tf.matmul(self.w_prime, + tf.tile(tf.expand_dims(self.S, 0), + [self.n, 1, 1])) + + mean_trace = tf.stack( + [tf.trace(tf.matmul(self.time_cov.Sigma_inv_x( + tf.transpose(mean[j])), + self.space_cov.Sigma_inv_x(mean[j]))) for j in range(self.n)]) + + w_trace = tf.stack( + [tf.trace(tf.matmul(self.marg_cov.Sigma_inv_x( + tf.transpose(self.w_prime[j])), + self.space_cov.Sigma_inv_x(self.w_prime[j]))) + for j in range(self.n)]) + + shared_term = (1/self.rhoprec_prime) *\ + tf.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ + tf.trace(tf.matmul(self.wcov_prime, + self.marg_cov.Sigma_inv + + tf.matmul(self.S, + self.time_cov.Sigma_inv_x( + tf.transpose(self.S))))) + rho_hat_unscaled = mean_trace + w_trace + shared_term + + return (self.v*(self.k+self.t)) / rho_hat_unscaled + + def _init_vars(self, X): + self.n = len(X) + + self.v, self.t = X[0].shape + + self.X = tf.constant(X, name="X") + + xsvd = [np.linalg.svd(x)for x in X] + + # parameters + self.b = tf.Variable(np.random.normal(size=(self.n, self.v, 1)), + name="b") + self.rhoprec = tf.Variable(np.ones(self.n), name="rhoprec") + + self.w_prime = tf.Variable(np.array([s[0][:, :self.k] for s in xsvd]), + name="w_prime") + self.rhoprec_prime = tf.Variable(np.ones(self.n), name="rhoprec_prime") + self.wcov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") + self.vcov_prime = tf.Variable(np.eye(self.v), name="vcov_prime") + + self.space_cov = self.space_noise_cov_class(size=self.v) + self.time_cov = self.time_noise_cov_class(size=self.t) + self.marg_cov = self.marg_cov_class(size=self.k) + + # we need Strp to be the actual param because stiefel is on the rows, + # and might as well initialize with SVD + + self.S_trp = tf.Variable(np.average([s[2][:self.k, :] for s in xsvd], + 0).T, + dtype=tf.float64, name="S_transpose") + self.S = tf.transpose(self.S_trp) + + def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): + """ + find S marginalizing W + + Parameters + ---------- + X: 2d array + Brain data matrix (voxels by TRs). Y in the math + n_iter: int, default=10 + Max iterations to run + """ + + # in case we get a list, and/or int16s or float32s + X = np.array(X).astype(np.float64) + self._init_vars(X) + + (w_prime_op, + rhoprec_prime_op, + vcov_prime_op, + wcov_prime_op) = self.make_estep_ops() + + b_op = self.make_mstep_b_op() + rhoprec_op = self.make_mstep_rhoprec_op() + + s_op = self.make_mstep_S_op() + + if self.algorithm == "ECME": + loss_op = -self._make_logp_op() + loss_name = "-Marginal Lik" + elif self.algorithm == "ECM": + loss_op = -self._make_Q_op() + loss_name = "-ELPD (Q)" + else: + logger.error("Unknown algorithm %s!" % self.algorithm) + + sigma_v_opt = ScipyOptimizerInterface(loss_op, + var_list=self.space_cov.get_optimize_vars(), + method=self.optMethod, + options=self.optCtrl) + + sigma_t_opt = ScipyOptimizerInterface(loss_op, + var_list=self.time_cov.get_optimize_vars(), + method=self.optMethod, + options=self.optCtrl) + + sigma_w_opt = ScipyOptimizerInterface(loss_op, + var_list=self.marg_cov.get_optimize_vars(), + method=self.optMethod, + options=self.optCtrl) + + s_trp_manifold = Stiefel(self.t, self.k) + solver = ConjugateGradient() + + problem = Problem(manifold=s_trp_manifold, cost=loss_op, + arg=self.S_trp, verbosity=1) + + # hacky hack hack to let us maintain state on the things + # we're not pymanopting + problem.backend._session = self.sess + + self.sess.run(tf.global_variables_initializer()) + + converged = False + for i in range(max_iter): + loss_start = loss_op.eval(session=self.sess) + logger.info("Iter %i, %s at start %f" % (i, loss_name, loss_start)) + + # ESTEP + # compute all the terms with old vals + w_prime_new = w_prime_op.eval(session=self.sess) + rhoprec_prime_new = rhoprec_prime_op.eval(session=self.sess) + wcov_prime_new = wcov_prime_op.eval(session=self.sess) + vcov_prime_new = vcov_prime_op.eval(session=self.sess) + + # for convergence, we check w, rho, and sigma_v (since we + # use them for reconstruction/projection) + w_norm = tf.norm(w_prime_new - self.w_prime).eval( + session=self.sess) / (self.n*self.v*self.k) + # update (since we reuse wcov_prime in computing w_prime) + self.w_prime.load(w_prime_new, session=self.sess) + self.rhoprec_prime.load(rhoprec_prime_new, session=self.sess) + self.wcov_prime.load(wcov_prime_new, session=self.sess) + self.vcov_prime.load(vcov_prime_new, session=self.sess) + + loss_end_estep = loss_op.eval(session=self.sess) + logger.info("Iter %i, %s at estep end %f" % + (i, loss_name, loss_end_estep)) + + # MSTEP + self.b.load(b_op.eval(session=self.sess), session=self.sess) + + rhoprec_new = rhoprec_op.eval(session=self.sess) + rhoprec_norm = tf.norm(rhoprec_new - self.rhoprec).eval( + session=self.sess) / self.n + self.rhoprec.load(rhoprec_new, session=self.sess) + + if self.s_constraint == "gaussian": + s_hat = s_op.eval(session=self.sess).T + elif self.s_constraint == "ortho": + if i == 0: + # initial guess it the least squares op + s_hat = solver.solve(problem, x=s_op.eval( + session=self.sess).T) + else: + s_hat = solver.solve(problem, x=self.S_trp.eval( + session=self.sess)) + + self.S_trp.load(s_hat, session=self.sess) + + old_sigma_v = self.space_cov.Sigma.eval(session=self.sess) + + if self.space_noise_cov_class is not CovIdentity: + sigma_v_opt.minimize(session=self.sess) + + sigv_norm = tf.norm(old_sigma_v - self.space_cov.Sigma).eval( + session=self.sess) / (self.v**2) + + if self.time_noise_cov_class is not CovIdentity: + sigma_t_opt.minimize(session=self.sess) + + if self.marg_cov_class is not CovIdentity: + sigma_w_opt.minimize(session=self.sess) + + loss_end_mstep = loss_op.eval(session=self.sess) + logger.info("Iter %i, %s at mstep end %f" % + (i, loss_name, loss_end_mstep)) + if loss_end_estep > loss_start: + logger.warn("Warning! estep did not improve loss!\ + Instead, worsened by %f" % + (loss_start-loss_end_estep)) + if loss_end_estep > loss_start: + logger.warn("Warning! mstep did not improve loss!\ + Instead, worsened by %f" % + (loss_end_estep-loss_end_mstep)) + + logger.info("Iter %i end, W norm %f, sigV norm %f,\ + rhoprec norm %f" % + (i, w_norm, sigv_norm, rhoprec_norm)) + + delQ = loss_end_mstep - loss_start + if np.max(np.r_[w_norm, sigv_norm, + rhoprec_norm, delQ]) <= convergence_tol: + converged = True + break + + if converged: + logger.info("Converged in %i iterations" % i) + else: + logger.warn("Not converged to tolerance!\ + Results may not be reliable") + self.w_ = self.w_prime.eval(session=self.sess) + self.s_ = self.S.eval(session=self.sess) + self.rho_ = 1/self.rhoprec.eval(session=self.sess) + + self.final_loss_ = loss_op.eval(session=self.sess) + self.logp_ = self._make_logp_op().eval(session=self.sess) + + def _condition(self, x): + s = np.linalg.svd(x, compute_uv=False) + return np.max(s)/np.min(s) + + def transform(self, X): + vprec = self.space_cov.Sigma_inv.eval(session=self.sess) + conditions = [self._condition((w.T @ vprec @ w)/r) + for (w, r) in zip(self.w_, self.rho_)] + logger.info(["Condition #s for transformation"] + conditions) + return [np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) + for w, x, r in zip(self.w_, X, self.rho_)] + + def transform_orthow(self, X): + # orthonormalize W + w_ortho = [w @ np.linalg.svd(w.T @ w)[0] / + np.sqrt(np.linalg.svd(w.T @ w)[1]) + for w in self.w_] + vprec = self.space_cov.Sigma_inv.eval(session=self.sess) + conditions = [self._condition((w.T @ vprec @ w)/r) + for (w, r) in zip(self.w_, self.rho_)] + logger.info(["Condition #s for transformation"] + conditions) + return [np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) + for w, x, r in zip(self.w_, X, self.rho_)] diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 342caed1a..352057502 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -9,7 +9,8 @@ CovUnconstrainedCholesky, CovUnconstrainedCholeskyWishartReg, CovUnconstrainedInvCholesky, - CovKroneckerFactored) + CovKroneckerFactored, + CovScaleMixin) import tensorflow as tf import pytest import logging @@ -328,3 +329,24 @@ def test_CovAR1_scan_onsets(): assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol) + +def test_CovScaleMixin(): + + base_cov = CovUnconstrainedCholesky(size=m) + sc_np = np.abs(np.random.normal(size=5)) + scales = tf.constant(sc_np)*5 + covs = [CovScaleMixin(base_cov, scales[j]) for j in range(5)] + + + with tf.Session() as sess: + # initialize the random covariance + sess.run(tf.variables_initializer(base_cov.get_optimize_vars())) + # verify that it is truly that cov scaled + for j in range(5): + + # compute the naive version + cov_np = base_cov.Sigma.eval(session=sess) * scales[j].eval(session=sess) + + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, covs[j].logdet.eval(session=sess), rtol=rtol, atol=atol) + assert_allclose(sinvx_np, covs[j].Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol, atol=atol) diff --git a/tests/matnormal/test_matnormal_srm.py b/tests/matnormal/test_matnormal_srm.py deleted file mode 100644 index 309459ce9..000000000 --- a/tests/matnormal/test_matnormal_srm.py +++ /dev/null @@ -1,377 +0,0 @@ -import numpy as np -import tensorflow as tf -# from brainiak.matnormal.srm_em_analytic import MatnormSRM_EM_Analytic -from brainiak.matnormal.srm_margs import MNSRM_OrthoW -from brainiak.matnormal.covs import CovUnconstrainedCholesky, CovDiagonal -from numpy.testing import assert_allclose -from brainiak.matnormal.utils import rmn -from scipy.stats import norm, pearsonr, invwishart -import logging - -logging.basicConfig(level=logging.INFO) - -n_T = 10 -n_V = 15 -n_features = 4 -n_subj = 3 - -def rmse(x, xtrue): - return np.sqrt(np.average((x-xtrue)**2)) - - -def estep(W, sigma_v, sigma_t, sigma_s, b, rho, X): - - sigma_t_prime = sigma_t - vinv = np.linalg.inv(sigma_v) - wsw = np.zeros((n_features, n_features)) - wsx = np.zeros((n_features, n_T)) - xmb = X - b - - for j in range(n_subj): - xmb_j = xmb[j*n_V:(j+1)*n_V] - w_j = W[j*n_V:(j+1)*n_V] - wsw = wsw + w_j.T @ np.linalg.solve(sigma_v, w_j) / rho[j] - wsx = wsx + w_j.T @ np.linalg.solve(sigma_v, xmb_j) / rho[j] - - # additional savings here via cholesky probably - sigma_s_prime_inv = np.linalg.inv(sigma_s) + wsw - s_prime = np.linalg.solve(sigma_s_prime_inv, wsx) - sigma_s_prime = np.linalg.solve(sigma_s_prime_inv, np.eye(sigma_s.shape[0])) - return s_prime, sigma_s_prime, sigma_t_prime - - -def ldet(s): - return np.linalg.slogdet(s)[1] - - -def gen_srm_data(n_T, n_V, n_subj, n_features, vcov=None, tcov=None, scov=None, ortho_w=False): - - if scov is None: - sigma_w = sigma_s = np.eye(n_features) - else: - sigma_w = scov - sigma_s = scov - - if vcov is None: - sigma_v = invwishart.rvs(size=1, df=n_V+2,scale = np.eye(n_V)) - else: - sigma_v = vcov - - if tcov is None: - sigma_t = invwishart.rvs(size=1, df=n_T+2,scale = np.eye(n_T)) - else: - sigma_t = tcov - - rho = np.exp(np.random.normal(size=n_subj)) - - W = rmn(np.kron(np.diag(rho), sigma_v), sigma_w) - - wlist = W.reshape(n_subj, n_V, n_features) - if ortho_w: - for i in range(n_subj): - u, s, v = np.linalg.svd(wlist[i].T @ wlist[i]) - wnew = wlist[i] @ u @ np.diag(1/np.sqrt(s)) @ v - # wnew = u @ np.diag(1/np.sqrt(s)) @ v @ wlist[i] - assert_allclose(wnew.T @ wnew, np.eye(n_features), rtol=1e-5, atol=1e-5) - wlist[i] = wnew - - W = wlist.reshape(n_subj*n_V, n_features) - S = rmn(sigma_s, sigma_t) - - b = np.random.normal(size=(n_subj * n_V, 1)) - ws = W @ S + b - X = ws + rmn(np.kron(np.diag(rho), sigma_v), sigma_t) - theta = W, S, b, sigma_v, sigma_t, sigma_s, 1/rho - true_sufficient_stats = estep(W, sigma_v, sigma_t, sigma_s, b, rho, X) - return X, theta, ws, true_sufficient_stats - - -def Q(W, sigma_v, sigma_t, sigma_s, b, rho, X, sigma_s_prime, sigma_t_prime, s_prime): - - v = sigma_v.shape[0] - n = rho.shape[0] - t = sigma_t.shape[0] - k = sigma_s.shape[0] - - kroncov = np.kron(np.diag(rho), sigma_v) - - mean = (X - b - W @ s_prime) - - det_terms = -(v*n)*ldet(sigma_t) - t*n*ldet(sigma_v) - t*ldet(sigma_s) - t*v*np.sum(np.log(rho)) - (k)*ldet(sigma_t) - - x_quad_form = -np.trace(np.linalg.solve(sigma_t, mean.T) @ np.linalg.solve(kroncov, mean)) - s_quad_form = -np.trace(np.linalg.solve(sigma_t, s_prime.T) @ np.linalg.solve(sigma_s, s_prime)) - - lik_trace = -np.trace(np.linalg.solve(sigma_t, sigma_t_prime)) * np.trace(sigma_s_prime @ W.T @ np.linalg.solve(kroncov, W)) - prior_trace = -np.trace(np.linalg.solve(sigma_t, sigma_t_prime)) * np.trace(np.linalg.solve(sigma_s, sigma_s_prime)) - - return 0.5 * (det_terms + x_quad_form + s_quad_form + lik_trace + prior_trace)#, det_terms, x_quad_form, s_quad_form, lik_trace, prior_trace - -def mstep_b(W, sigma_v, sigma_t, sigma_s, b, rho, X, sigma_s_prime, sigma_t_prime, s_prime): - - t = sigma_t.shape[0] - b_hat = ((X - W @ s_prime) @ np.linalg.solve(sigma_t, np.ones((t,1)))) / np.sum(np.linalg.inv(sigma_t)) - return b_hat - -def mstep_rho(W, sigma_v, sigma_t, sigma_s, b, rho, X, sigma_s_prime, sigma_t_prime, s_prime): - mean = (X - b - W @ s_prime) - n, v, t = rho.shape[0], sigma_v.shape[0], sigma_t.shape[0] - rho_grad = np.zeros(n) - - rho_hat = np.zeros(n) - for j in range(n): - mean_j = mean[j*v:(j+1)*v] - w_j = W[j*v:(j+1)*v] - rho_hat[j] = (np.trace(np.linalg.solve(sigma_t, mean_j.T) @ np.linalg.solve(sigma_v, mean_j)) + - np.trace(np.linalg.solve(sigma_t, sigma_t_prime)) * np.trace(sigma_s_prime @ w_j.T @ np.linalg.solve(sigma_v, w_j)))/(t*v) - - return rho_hat - -def _load_model_params(model, s_prime, b, rhoprec, Xstack, scov_prime, tcov_prime, w): - model.s_prime.load(s_prime, session=model.sess) - model.b.load(b.reshape(n_subj, n_V, 1), session=model.sess) - model.rhoprec.load(rhoprec, session=model.sess) - model.w.load(w.reshape(n_subj,n_V,n_features), session=model.sess) - model.scov_prime.load(scov_prime, session=model.sess) - model.tcov_prime.load(tcov_prime, session=model.sess) - - -def _init_all(): - - Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features) - s_prime, sigma_s_prime, sigma_t_prime = true_sufficient_stats - - W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta - - q_np = Q(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, sigma_s_prime, sigma_t_prime, s_prime) - - X = Xstack.reshape(n_subj, n_V, n_T) - - model = MNSRM_OrthoW(n_features=n_features) - - model.n = len(X) - - model.v, model.t = X[0].shape - - model.X = tf.constant(X, name="X") - - xsvd = [np.linalg.svd(x)for x in X] - - # parameters - model.b = tf.Variable(np.random.normal(size=(model.n, model.v,1)), name="b") - model.rhoprec = tf.Variable(np.ones(model.n), name="rhoprec") - model.w = tf.Variable(np.array([sv[0][:,:model.k] for sv in xsvd]), name="w") - - # sufficient statistics - model.s_prime = tf.Variable(np.average([sv[2][:model.k, :] for sv in xsvd], 0), dtype=tf.float64, name="s_prime") - model.tcov_prime = tf.Variable(np.eye(model.t), name="wcov_prime") - model.scov_prime = tf.Variable(np.eye(model.k), name="vcov_prime") - - model.space_cov = CovFullRankCholesky(size=n_V, Sigma=sigma_v) - model.time_cov = CovFullRankCholesky(size=n_T, Sigma=sigma_t) - model.marg_cov = CovFullRankCholesky(size=n_features, Sigma=sigma_s) - - model.sess.run(tf.global_variables_initializer()) - - _load_model_params(model, s_prime, b, rhoprec, Xstack, sigma_s_prime, sigma_t_prime, W) - - return Xstack, theta, ws, true_sufficient_stats, model - - -def test_Q(): - - # q_op, det_terms_op, x_quad_form_op, s_quad_form_op, lik_trace_op, prior_trace_op = model._make_Q_op() - - Xstack, theta, ws, true_sufficient_stats, model = _init_all() - - q_op = model._make_Q_op() - - q_tf = q_op.eval(session=model.sess) - # det_terms_tf = det_terms_op.eval(session=model.sess) - # x_quad_form_tf = x_quad_form_op.eval(session=model.sess) - # s_quad_form_tf = s_quad_form_op.eval(session=model.sess) - # lik_trace_tf = lik_trace_op.eval(session=model.sess) - # prior_trace_tf = prior_trace_op.eval(session=model.sess) - - # q_np, det_terms_np, x_quad_form_np, s_quad_form_np, lik_trace_np, prior_trace_np = Q(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, sigma_s_prime, sigma_t_prime, s_prime) - - s_prime, sigma_s_prime, sigma_t_prime = true_sufficient_stats - - W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta - - q_np = Q(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, sigma_s_prime, sigma_t_prime, s_prime) - - assert_allclose(q_tf, q_np) - - -def test_estep(): - - Xstack, theta, ws, true_sufficient_stats, model = _init_all() - - s_prime_op, scov_prime_op, tcov_prime_op = model.make_estep_ops() - - s_prime_tf = s_prime_op.eval(session=model.sess) - scov_prime_tf = scov_prime_op.eval(session=model.sess) - tcov_prime_tf = tcov_prime_op.eval(session=model.sess) - - W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta - - s_prime_np, scov_prime_np, tcov_prime_np = estep(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack) - - assert_allclose(s_prime_np, s_prime_tf) - assert_allclose(scov_prime_np, scov_prime_tf) - assert_allclose(tcov_prime_np, tcov_prime_tf) - - -def test_mstep(): - Xstack, theta, ws, true_sufficient_stats, model = _init_all() - W, S, b, sigma_v, sigma_t, sigma_s, rhoprec = theta - - b_op = model.make_mstep_b_op() - # S_op = model.make_mstep_S_op() - rhoprec_op = model.make_mstep_rhoprec_op() - - s_prime, scov_prime, tcov_prime = estep(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack) - - b_tf = b_op.eval(session=model.sess) - - b_np = mstep_b(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, scov_prime, tcov_prime, s_prime) - - assert_allclose(b_np.reshape(n_subj, n_V, 1), b_tf) - - rhoprec_tf = rhoprec_op.eval(session=model.sess) - - rhoprec_np = 1/mstep_rho(W, sigma_v, sigma_t, sigma_s, b, 1/rhoprec, Xstack, scov_prime, tcov_prime, s_prime) - assert_allclose(rhoprec_tf, rhoprec_np) - -def test_dpsrm_identity_covs(): - - sigma_t = np.eye(n_T) - sigma_v = np.eye(n_V) - sigma_w = sigma_s = np.eye(n_features) - - Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, - vcov=sigma_v, tcov=sigma_t, - scov=sigma_s, ortho_w=True) - X = Xstack.reshape(n_subj, n_V, n_T) - - model = MNSRM_OrthoW(n_features=n_features, space_noise_cov=CovIdentity, time_noise_cov=CovIdentity) - - - model.fit(X, n_iter=10) - - reconstructed_WS = model.w_.dot(model.s_) - rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) - assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) - - - -def test_mnsrm_ecm(): - - sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) - sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) - - Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, - vcov=sigma_v, tcov=sigma_t, - scov=None, ortho_w=True) - - X = Xstack.reshape(n_subj, n_V, n_T) - - model = MNSRM_OrthoW(n_features=n_features, space_noise_cov=CovDiagonal, time_noise_cov=CovDiagonal) - - model.fit(X, n_iter=10) - - reconstructed_WS = model.w_.dot(model.s_) - rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) - assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) - - -def test_dpmnsrm_transform(): - sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) - sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) - - Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, - vcov=sigma_v, tcov=sigma_t, - wcov=None, ortho_s=False) - - W = theta[0] - X = Xstack.reshape(n_subj, n_V, n_T) - - model = DPMNSRM(n_features=n_features, space_noise_cov=CovDiagonal, time_noise_cov=CovDiagonal) - - model.fit(X, max_iter=50, convergence_tol=0.001) - - newS = np.random.normal(size=(n_subj, n_features, n_T)) - - newX = np.array([w @ s for (w,s) in zip(W.reshape(n_subj, n_V, n_features), newS)]) + np.random.normal(size=(n_subj, n_V, n_T)) - - new_WS = np.array([w @ s for (w,s) in zip(W.reshape(n_subj, n_V, n_features), newS)]) - - projected_wS = np.array([w @ s for (w,s) in zip(model.w_, model.transform(newX))]) - rmse(new_WS.flatten(), projected_wS.flatten()) - pearsonr(new_WS.flatten(), projected_wS.flatten()) - assert(pearsonr(new_WS.flatten(), projected_wS.flatten())[0] > 0.8) - -def test_dpmnsrm_orthos(): - sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) - sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) - sigma_w = invwishart.rvs(size=1, df=n_features+2,scale = np.eye(n_features)) - - Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, - vcov=sigma_v, tcov=sigma_t, - wcov=sigma_w) - - W = theta[0] - X = Xstack.reshape(n_subj, n_V, n_T) - - model = DPMNSRM(n_features=n_features, space_noise_cov=CovDiagonal, - time_noise_cov=CovDiagonal, - w_cov=CovFullRankCholesky, - s_constraint='ortho') - - model.fit(X, max_iter=50, convergence_tol=0.001) - - reconstructed_WS = model.w_.dot(model.s_) - rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) - assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) - - -def test_dpsrm_identity_covs(): - sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) - sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) - sigma_w = invwishart.rvs(size=1, df=n_features+2,scale = np.eye(n_features)) - - Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, - vcov=sigma_v, tcov=sigma_t, - wcov=sigma_w) - - W = theta[0] - X = Xstack.reshape(n_subj, n_V, n_T) - - model = DPMNSRM(n_features=n_features) - - model.fit(X, max_iter=50, convergence_tol=0.001) - - reconstructed_WS = model.w_.dot(model.s_) - rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) - assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) - -def test_dpsrm_identity_covs_orthos(): - sigma_t = np.diag(np.abs(norm.rvs(size=n_T))) - sigma_v = np.diag(np.abs(norm.rvs(size=n_V))) - sigma_w = invwishart.rvs(size=1, df=n_features+2,scale = np.eye(n_features)) - - Xstack, theta, ws, true_sufficient_stats = gen_srm_data(n_T, n_V, n_subj, n_features, - vcov=sigma_v, tcov=sigma_t, - wcov=sigma_w) - - W = theta[0] - X = Xstack.reshape(n_subj, n_V, n_T) - - model = DPMNSRM(n_features=n_features, s_constraint="ortho", w_cov=CovFullRankCholesky) - - model.fit(X, max_iter=50, convergence_tol=0.001) - - reconstructed_WS = model.w_.dot(model.s_) - rmse(ws.reshape(n_subj, n_V, n_T), reconstructed_WS) - assert(pearsonr(ws.reshape(n_subj, n_V, n_T).flatten(), reconstructed_WS.flatten())[0] > 0.8) \ No newline at end of file From 56cc6c8ccd05a789689843baae67db3bbb588542 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 17 Jul 2019 20:56:44 -0700 Subject: [PATCH 14/84] move kronecker solvers to their own file, utils.py was getting unwieldy --- brainiak/utils/kronecker_solvers.py | 309 ++++++++++++++++++++++++++++ 1 file changed, 309 insertions(+) create mode 100644 brainiak/utils/kronecker_solvers.py diff --git a/brainiak/utils/kronecker_solvers.py b/brainiak/utils/kronecker_solvers.py new file mode 100644 index 000000000..fc67b1764 --- /dev/null +++ b/brainiak/utils/kronecker_solvers.py @@ -0,0 +1,309 @@ +import tensorflow as tf + +__all__ = [ + "tf_kron_mult", + "tf_masked_triangular_solve", +] + +def tf_solve_lower_triangular_kron(L, y): + """ Tensor flow function to solve L x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + """ + n = len(L) + if n == 1: + return tf.matrix_triangular_solve(L[0], y) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + + for i in range(na): + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + xinb = tf_solve_lower_triangular_kron(L[1:], t) + xina = xina - tf.reshape( + tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), [1, nb*col]), + [(na-i-1)*nb, col]) * \ + tf.reshape( + tf.tile(tf.reshape(t, [-1, 1]), [na-i-1, 1]), + [(na-i-1)*nb, col]) + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x + + +def tf_solve_upper_triangular_kron(L, y): + """ Tensor flow function to solve L^T x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + """ + n = len(L) + if n == 1: + return tf.matrix_triangular_solve(L[0], y, adjoint=True) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + + for i in range(na-1, -1, -1): + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + xinb = tf_solve_upper_triangular_kron(L[1:], t) + xt = (xt + - tf.reshape( + tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), + [1, nb*col]), + [i*nb, col]) + * tf.reshape( + tf.tile(tf.reshape(t, [-1, 1]), [i, 1]), [i*nb, col])) + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x + + +def tf_kron_mult(L, x): + """ Tensorflow multiply with kronecker product matrix + Returs kron(L[0], L[1] ...) * x + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a square matrix of dimension n_i x n_i + + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + + Returns + ------- + y : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p + """ + n = len(L) + if n == 1: + return tf.matmul(L[0], x) + else: + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + xt = tf_kron_mult( + L[1:], + tf.transpose(tf.reshape(tf.transpose(x), [-1, nb]))) + y = tf.zeros_like(x) + for i in range(na): + ya, yb, yc = tf.split(y, [i*nb, nb, (na-i-1)*nb], 0) + yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), + tf.transpose(tf.slice(L[0], [i, 0], [1, na]))), + [nb, col]) + y = tf.concat(axis=0, values=[ya, yb, yc]) + return y + + +def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): + """ Tensor flow function to solve L x = y + where L is a lower triangular matrix with a mask + + Arguments + --------- + L : 2-D tensor + Must be a tensorflow tensor and + must be a triangular matrix of dimension n x n + + y : 1-D or 2-D tensor + Dimension n x p + + mask : 1-D tensor + Dimension n x 1, should be 1 if element is valid, 0 if invalid + + lower : boolean (default : True) + True if L is lower triangular, False if upper triangular + + adjoint : boolean (default : False) + True if solving for L^x = y, False if solving for Lx = y + + Returns + ------- + x : 1-D or 2-D tensor + Dimension n x p, values at rows for which mask == 0 are set to zero + + """ + + zero = tf.constant(0, dtype=tf.int32) + mask_mat = tf.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1, 1]), + tf.reshape(mask, [1, -1])), zero)) + q = tf.to_int32(tf.sqrt(tf.to_double(tf.shape(mask_mat)[0]))) + L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q]) + + maskindex = tf.where(tf.not_equal(mask, zero)) + y_masked = tf.gather_nd(y, maskindex) + + x_s1 = tf.matrix_triangular_solve(L_masked, y_masked, + lower=lower, adjoint=adjoint) + x = tf.scatter_nd(maskindex, x_s1, tf.to_int64(tf.shape(y))) + return x + + +def tf_solve_lower_triangular_masked_kron(L, y, mask): + """ Tensor flow function to solve L x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension [n_0*n_1*..n_(m-1)), p] + + mask: 1-D tensor + Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0 + for don't care + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p, values at rows + for which mask == 0 are set to zero + + """ + n = len(L) + if n == 1: + return tf_masked_triangular_solve(L[0], y, mask, + lower=True, adjoint=False) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + + for i in range(na): + mask_b = tf.slice(mask, [i*nb], [nb]) + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + + if tf.reduce_sum(mask_b) != nb: + xinb = tf_solve_lower_triangular_masked_kron(L[1:], t, mask_b) + t_masked = tf_kron_mult(L[1:], xinb) + + else: + # all valid - same as no mask + xinb = tf_solve_lower_triangular_kron(L[1:], t) + t_masked = t + xina = (xina + - tf.reshape( + tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), + [1, nb*col]), + [(na-i-1)*nb, col]) + * tf.reshape( + tf.tile(tf.reshape(t_masked, [-1, 1]), [na-i-1, 1]), + [(na-i-1)*nb, col])) + + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x + + +def tf_solve_upper_triangular_masked_kron(L, y, mask): + """ Tensor flow function to solve L^T x = y + where L = kron(L[0], L[1] .. L[n-1]) + and L[i] are the lower triangular matrices + + Arguments + --------- + L : list of 2-D tensors + Each element of the list must be a tensorflow tensor and + must be a lower triangular matrix of dimension n_i x n_i + + y : 1-D or 2-D tensor + Dimension [n_0*n_1*..n_(m-1)), p] + + mask: 1-D tensor + Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows + and 0 for don't care + + Returns + ------- + x : 1-D or 2-D tensor + Dimension (n_0*n_1*..n_(m-1)) x p, values at rows + for which mask == 0 are set to zero + + """ + n = len(L) + if n == 1: + return tf_masked_triangular_solve(L[0], y, mask, + lower=True, adjoint=True) + else: + x = y + na = L[0].get_shape().as_list()[0] + n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) + n_prod = tf.to_int32(tf.reduce_prod(n_list)) + nb = tf.to_int32(n_prod/na) + col = tf.shape(x)[1] + L1_end_tr = [tf.transpose(x) for x in L[1:]] + + for i in range(na-1, -1, -1): + mask_b = tf.slice(mask, [i*nb], [nb]) + xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + t = xinb / L[0][i, i] + + if tf.reduce_sum(mask_b) != nb: + xinb = tf_solve_upper_triangular_masked_kron(L[1:], t, mask_b) + t_masked = tf_kron_mult(L1_end_tr, xinb) + else: + xinb = tf_solve_upper_triangular_kron(L[1:], t) + t_masked = t + + xt = (xt + - tf.reshape( + tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), + [1, nb*col]), + [i*nb, col]) + * tf.reshape( + tf.tile(tf.reshape(t_masked, [-1, 1]), [i, 1]), + [i*nb, col])) + x = tf.concat(axis=0, values=[xt, xinb, xina]) + + return x From e5a2e9460382c0fb9eb34ede81542e7919f3e394 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Tue, 31 Dec 2019 20:06:44 -0800 Subject: [PATCH 15/84] initial refactor, all tests pass --- brainiak/matnormal/covs.py | 309 +++++++----------- brainiak/matnormal/matnormal_likelihoods.py | 62 ++-- brainiak/matnormal/regression.py | 6 +- brainiak/matnormal/utils.py | 40 --- tests/matnormal/test_cov.py | 87 +++-- tests/matnormal/test_matnormal_logp.py | 6 +- .../test_matnormal_logp_conditional.py | 16 +- .../matnormal/test_matnormal_logp_marginal.py | 8 +- 8 files changed, 219 insertions(+), 315 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 223ed70b8..03b3eeac8 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -4,8 +4,8 @@ import scipy.linalg import scipy.sparse from tensorflow.contrib.distributions import InverseGamma, WishartCholesky -from brainiak.matnormal.utils import define_scope, xx_t -from brainiak.utils.utils import tf_solve_lower_triangular_kron,\ +from brainiak.matnormal.utils import x_tx, xx_t +from brainiak.utils.kronecker_solvers import tf_solve_lower_triangular_kron,\ tf_solve_upper_triangular_kron, \ tf_solve_lower_triangular_masked_kron, \ tf_solve_upper_triangular_masked_kron @@ -22,13 +22,16 @@ 'CovKroneckerFactored'] -class CovBase: +class CovBase(object): """Base metaclass for noise covariances """ __metaclass__ = abc.ABCMeta def __init__(self, size): self.size = size + + # Log-likelihood of this covariance (useful for regularization) + self.logp = tf.constant(0, dtype=tf.float64) @abc.abstractmethod def get_optimize_vars(self): @@ -37,59 +40,50 @@ def get_optimize_vars(self): """ pass - @abc.abstractproperty def logdet(self): """ log|Sigma| """ pass @abc.abstractmethod - def Sigma_inv_x(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + def solve(self, X): + """Given this covariance and some X, compute :math:`Sigma^{-1} * x` """ pass - @define_scope - def Sigma(self): - """return Sigma + @property + def _prec(self): + """Expose the precision explicitly (mostly for testing / + visualization) """ - return tf.matrix_inverse(self.Sigma_inv) + return self.solve(tf.eye(self.size, dtype=tf.float64)) - @define_scope - def Sigma_inv(self): - """ Sigma^{-1}. Override me with more efficient implementation in subclasses + @property + def _cov(self): + """Expose the covariance explicitly (mostly for testing / + visualization) """ - return self.Sigma_inv_x(tf.diag(tf.ones([self.size], - dtype=tf.float64))) + return tf.linalg.inv(self._prec) - @define_scope - def logp(self): - """ Log-likelihood of this covariance (useful for regularization) - """ - return tf.constant(0, dtype=tf.float64) +# class CovTFWrap(CovBase): +# """ thin wrapper around a TF tensor +# """ +# def __init__(self, Sigma): -class CovTFWrap(CovBase): - """ thin wrapper around a TF tensor - """ - def __init__(self, Sigma): +# self.L = tf.cholesky(Sigma) +# self.logdet = 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) - self.L = tf.cholesky(Sigma) +# def get_optimize_vars(self): +# """ Returns a list of tf variables that need to get optimized to fit +# this covariance +# """ +# return [] - def get_optimize_vars(self): - """ Returns a list of tf variables that need to get optimized to fit - this covariance - """ - return [] - - def Sigma_inv_x(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` - """ - return tf.cholesky_solve(self.L, X) - - @define_scope - def logdet(self): - return 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) +# def solve(self, X): +# """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` +# """ +# return tf.cholesky_solve(self.L, X) class CovIdentity(CovBase): @@ -97,6 +91,7 @@ class CovIdentity(CovBase): """ def __init__(self, size): super(CovIdentity, self).__init__(size) + self.logdet = tf.constant(0.0, 'float64') def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to @@ -104,29 +99,19 @@ def get_optimize_vars(self): """ return [] - @define_scope - def logdet(self): - """ log|Sigma| - """ - return tf.constant(0.0, 'float64') - - def Sigma_inv_x(self, X): + def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` """ return X - @define_scope - def Sigma_inv(self): - """ Sigma^{-1}. - """ - return tf.diag(tf.ones([self.size], dtype=tf.float64)) - class CovAR1(CovBase): """AR1 covariance """ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): + super(CovAR1, self).__init__(size) + # Similar to BRSA trick I think if scan_onsets is None: self.run_sizes = [size] @@ -163,7 +148,25 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): else: self.rho_unc = tf.Variable(np.log(rho), name="rho") - super(CovAR1, self).__init__(size) + # make logdet, first unconstrain rho and sigma + rho = 2 * tf.sigmoid(self.rho_unc) - 1 + sigma = tf.exp(self.log_sigma) + # now compute logdet + self.logdet = tf.reduce_sum(2 * tf.constant(self.run_sizes, + dtype=tf.float64) * + tf.log(sigma) - tf.log(1 - tf.square(rho))) + + # precompute sigma_inv op + # Unlike BRSA we assume stationarity within block so no special case + # for first/last element of a block. This makes constructing this + # matrix easier. + # reprsimil.BRSA says (I - rho1 * D + rho1**2 * F) / sigma**2 + + rho = 2 * tf.sigmoid(self.rho_unc) - 1 + sigma = tf.exp(self.log_sigma) + self.Sigma_inv = (self._identity_mat - rho * self.offdiag_template + rho**2 * + self.diag_template) / tf.square(sigma) + def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to @@ -171,35 +174,11 @@ def get_optimize_vars(self): """ return [self.rho_unc, self.log_sigma] - @define_scope - def logdet(self): - """ log|Sigma| - """ - rho = 2 * tf.sigmoid(self.rho_unc) - 1 - sigma = tf.exp(self.log_sigma) - - return tf.reduce_sum(2 * tf.constant(self.run_sizes, - dtype=tf.float64) * - tf.log(sigma) - tf.log(1 - tf.square(rho))) - - def Sigma_inv_x(self, X): + def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` """ return tf.matmul(self.Sigma_inv, X) - @define_scope - def Sigma_inv(self): - """ Sigma^{-1}. - Unlike BRSA we assume stationarity within block so no special case - for first/last element of a block. This makes constructing this - matrix easier. - reprsimil.BRSA says (I - rho1 * D + rho1**2 * F) / sigma**2 - """ - rho = 2 * tf.sigmoid(self.rho_unc) - 1 - sigma = tf.exp(self.log_sigma) - return (self._identity_mat - rho * self.offdiag_template + rho**2 * - self.diag_template) / tf.square(sigma) - class CovIsotropic(CovBase): """Scaled identity (isotropic) noise covariance. @@ -213,9 +192,9 @@ def __init__(self, size, sigma=None): else: self.log_sigma = tf.Variable(np.log(sigma), name="sigma") - @define_scope - def sigma(self): - return tf.exp(self.log_sigma) + self.logdet = self.size * self.log_sigma + + self.sigma = tf.exp(self.log_sigma) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -223,23 +202,12 @@ def get_optimize_vars(self): """ return [self.log_sigma] - @define_scope - def logdet(self): - """ log|Sigma| - """ - return self.size * self.log_sigma - def Sigma_inv_x(self, X): + def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` """ return X / self.sigma - @define_scope - def Sigma_inv(self): - """ Sigma^{-1}. - """ - return tf.diag(tf.ones([self.size], dtype=tf.float64)) / self.sigma - class CovDiagonal(CovBase): """Uncorrelated (diagonal) noise covariance @@ -252,13 +220,9 @@ def __init__(self, size, sigma=None): else: self.logprec = tf.Variable(np.log(1/sigma), name="log-precisions") - @define_scope - def prec(self): - return tf.exp(self.logprec) - - @define_scope - def prec_dimaugmented(self): - return tf.expand_dims(self.prec, -1) + self.logdet = -tf.reduce_sum(self.logprec) + self.prec = tf.exp(self.logprec) + self.prec_dimaugmented = tf.expand_dims(self.prec, -1) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -266,23 +230,11 @@ def get_optimize_vars(self): """ return [self.logprec] - @define_scope - def logdet(self): - """ log|Sigma| - """ - return -tf.reduce_sum(self.logprec) - - def Sigma_inv_x(self, X): + def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` """ return tf.multiply(self.prec_dimaugmented, X) - @define_scope - def Sigma_inv(self): - """ Sigma^{-1}. - """ - return tf.diag(tf.ones([self.size], dtype=tf.float64) * self.prec) - class CovDiagonalGammaPrior(CovDiagonal): """Uncorrelated (diagonal) noise covariance @@ -294,21 +246,31 @@ def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): dtype=tf.float64), rate=tf.constant(beta, dtype=tf.float64)) - @define_scope - def logp(self): - return tf.reduce_sum(self.ig.log_prob(self.prec)) + self.logp = tf.reduce_sum(self.ig.log_prob(self.prec)) class CovUnconstrainedCholesky(CovBase): """Unconstrained noise covariance parameterized in terms of its cholesky """ - def __init__(self, size, Sigma=None): + def __init__(self, size=None, Sigma=None): + + if size is None and Sigma is None: + raise RuntimeError("Must pass either Sigma or size") + + if size is not None and Sigma is not None: + raise RuntimeError("Must pass either Sigma or size but not both") + + if Sigma is not None: + size = Sigma.shape[0] + super(CovUnconstrainedCholesky, self).__init__(size) + if Sigma is None: self.L_full = tf.Variable(tf.random_normal([size, size], dtype=tf.float64), name="L_full", dtype="float64") + else: # in order to respect the Sigma we got passed in, we log the diag # which we will later exp. a little ugly but this @@ -318,25 +280,20 @@ def __init__(self, size, Sigma=None): self.L_full = tf.Variable(L, name="L_full", dtype="float64") - @define_scope - def L(self): - """ Zero out triu of L_full to get cholesky L. - This seems dumb but TF is smart enough to set the gradient to zero - for those elements, and the alternative (fill_lower_triangular from - contrib.distributions) is inefficient and recommends not doing the - packing (for now). - Also: to make the parameterization unique we exp the diagonal so - it's positive. - """ + + # Zero out triu of L_full to get cholesky L. + # This seems dumb but TF is smart enough to set the gradient to zero + # for those elements, and the alternative (fill_lower_triangular from + # contrib.distributions) is inefficient and recommends not doing the + # packing (for now). + # Also: to make the parameterization unique we exp the diagonal so + # it's positive. + L_indeterminate = tf.matrix_band_part(self.L_full, -1, 0) - return tf.matrix_set_diag(L_indeterminate, + self.L = tf.matrix_set_diag(L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate))) - @define_scope - def Sigma(self): - """ covariance - """ - return xx_t(self.L) + self.logdet = 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -344,20 +301,13 @@ def get_optimize_vars(self): """ return [self.L_full] - @define_scope - def logdet(self): - """ log|Sigma| using a cholesky solve - """ - return 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) - - def Sigma_inv_x(self, X): + def solve(self, X): """ Given this Sigma and some X, compute :math:`Sigma^{-1} * x` using cholesky solve """ return tf.cholesky_solve(self.L, X) - class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): """Unconstrained noise covariance parameterized in terms of its cholesky factor. @@ -372,22 +322,19 @@ def __init__(self, size, Sigma=None): scale=tf.constant(1e5 * np.eye(size), dtype=tf.float64)) - @define_scope - def logp(self): - """ Log-likelihood of this covariance - """ - # l = self.wishartReg.log_prob(self.Sigma) - # l = tf.Print(l, [self.Sigma], 'sigma') - # l = tf.Print(l, [tf.self_adjoint_eigvals(self.L)], 'eigs') - return self.wishartReg.log_prob(self.Sigma) - + Sigma = xx_t(self.L) + self.logp = self.wishartReg.log_prob(Sigma) class CovUnconstrainedInvCholesky(CovBase): """Unconstrained noise covariance parameterized - in terms of its precision cholesky + in terms of its precision cholesky. Use this over the + regular cholesky unless you have a good reason not to, since + you save a solve on every step. """ def __init__(self, size, invSigma=None): + super(CovUnconstrainedInvCholesky, self).__init__(size) + if invSigma is None: self.Linv_full = tf.Variable(tf.random_normal([size, size], dtype=tf.float64), name="Linv_full") @@ -395,27 +342,17 @@ def __init__(self, size, invSigma=None): self.Linv_full = tf.Variable(np.linalg.cholesky(invSigma), name="Linv_full") - super(CovUnconstrainedInvCholesky, self).__init__(size) - - @define_scope - def Linv(self): - """ Zero out triu of L_full to get cholesky L. - This seems dumb but TF is smart enough to set the gradient to zero - for those elements, and the alternative (fill_lower_triangular from - contrib.distributions) is inefficient and recommends not doing the - packing (for now). - Also: to make the parameterization unique we log the diagonal so - it's positive. - """ + # Zero out triu of L_full to get cholesky L. + # This seems dumb but TF is smart enough to set the gradient to zero + # for those elements, and the alternative (fill_lower_triangular from + # contrib.distributions) is inefficient and recommends not doing the + # packing (for now). + # Also: to make the parameterization unique we log the diagonal so + # it's positive. L_indeterminate = tf.matrix_band_part(self.Linv_full, -1, 0) - return tf.matrix_set_diag(L_indeterminate, + self.Linv = tf.matrix_set_diag(L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate))) - - @define_scope - def Sigma(self): - """ cov - """ - return tf.matrix_inverse(self.Sigma_inv) + self.logdet = -2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -423,18 +360,12 @@ def get_optimize_vars(self): """ return [self.Linv_full] - @define_scope - def logdet(self): - """ log|Sigma| using a cholesky solve - """ - return -2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) - - def Sigma_inv_x(self, X): + def solve(self, X): """ Given this Sigma and some X, compute :math:`Sigma^{-1} * x` using - cholesky solve + matmul (since we're parameterized by L_inv) """ - return tf.matmul(xx_t(self.Linv), X) + return tf.matmul(x_tx(self.Linv), X) class CovKroneckerFactored(CovBase): @@ -484,29 +415,21 @@ def __init__(self, sizes, Sigmas=None, mask=None): for i in range(self.nfactors)] self.mask = mask - @define_scope - def L(self): - """ Zero out triu of all factors in L_full to get cholesky L. - This seems dumb but TF is smart enough to set the gradient to - zero for those elements, and the alternative - (fill_lower_triangular from contrib.distributions) - is inefficient and recommends not doing the packing (for now). - Also: to make the parameterization unique we log the diagonal - so it's positive. - """ + # make a list of choleskys L_indeterminate = [tf.matrix_band_part(mat, -1, 0) for mat in self.L_full] - return [tf.matrix_set_diag(mat, tf.exp(tf.matrix_diag_part(mat))) + self.L = [tf.matrix_set_diag(mat, tf.exp(tf.matrix_diag_part(mat))) for mat in L_indeterminate] + self.logdet = self._make_logdet() + def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit this covariance """ return self.L_full - @define_scope - def logdet(self): + def _make_logdet(self): """ log|Sigma| using the diagonals of the cholesky factors. """ if self.mask is None: @@ -528,7 +451,7 @@ def logdet(self): logdetfinal = tf.reduce_sum(logdet) return (2.0*logdetfinal) - def Sigma_inv_x(self, X): + def solve(self, X): """ Given this Sigma and some X, compute Sigma^{-1} * x using traingular solves with the cholesky factors. Do 2 triangular solves - L L^T x = y as L z = y and L^T x = z diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index a036aa9f5..26fda1444 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -6,6 +6,9 @@ def _condition(X): + """ + Condition number, used for diagnostics + """ s = tf.svd(X, compute_uv=False) return tf.reduce_max(s)/tf.reduce_min(s) @@ -24,37 +27,40 @@ def solve_det_marginal(x, sigma, A, Q): + \log|Q| + \log|\Sigma| """ - # we care about condition number of i_qf + # For diagnostics, we want to check condition numbers + # of things we invert. This includes Q and Sigma, as well + # as the "lemma factor" for lack of a better definition if logging.getLogger().isEnabledFor(logging.DEBUG): - A = tf.Print(A, [_condition(Q.Sigma_inv + tf.matmul(A, - sigma.Sigma_inv_x(A), transpose_a=True))], - 'i_qf condition') - # since the sigmas expose only inverse, we invert their - # conditions to get what we want - A = tf.Print(A, [1/_condition(Q.Sigma_inv)], 'Q condition') - A = tf.Print(A, [1/_condition(sigma.Sigma_inv)], 'sigma condition') + logging.log("Printing diagnostics for solve_det_marginal") + A = tf.Print(A, [_condition(Q._prec + tf.matmul(A, + sigma.solve(A), transpose_a=True))], + 'lemma_factor condition') + A = tf.Print(A, [_condition(Q._cov)], 'Q condition') + A = tf.Print(A, [_condition(sigma._cov)], 'sigma condition') A = tf.Print(A, [tf.reduce_max(A), tf.reduce_min(A)], 'A minmax') - # cholesky of (Qinv + A' Sigma^{-1} A) - i_qf_cholesky = tf.cholesky(Q.Sigma_inv + tf.matmul(A, - sigma.Sigma_inv_x(A), transpose_a=True)) + # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like + # a schur complement by isn't, so we call it the "lemma factor" + # since we use it in woodbury and matrix determinant lemmas + lemma_factor = tf.cholesky(Q._prec + tf.matmul(A, + sigma.solve(A), transpose_a=True)) logdet = Q.logdet + sigma.logdet +\ - 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(i_qf_cholesky))) + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) if logging.getLogger().isEnabledFor(logging.DEBUG): logdet = tf.Print(logdet, [Q.logdet], 'Q logdet') logdet = tf.Print(logdet, [sigma.logdet], 'sigma logdet') logdet = tf.Print(logdet, [2 * tf.reduce_sum(tf.log( - tf.matrix_diag_part(i_qf_cholesky)))], + tf.matrix_diag_part(lemma_factor)))], 'iqf logdet') # A' Sigma^{-1} - Atrp_Sinv = tf.matmul(A, sigma.Sigma_inv, transpose_a=True) + Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Qinv + A' Sigma^{-1} A)^{-1} A' Sigma^{-1} - prod_term = tf.cholesky_solve(i_qf_cholesky, Atrp_Sinv) + prod_term = tf.cholesky_solve(lemma_factor, Atrp_Sinv) - solve = tf.matmul(sigma.Sigma_inv_x(scaled_I(1.0, sigma.size) - + solve = tf.matmul(sigma.solve(scaled_I(1.0, sigma.size) - tf.matmul(A, prod_term)), x) return solve, logdet @@ -75,18 +81,18 @@ def solve_det_conditional(x, sigma, A, Q): """ # (Q - A' Sigma^{-1} A) - i_qf_cholesky = tf.cholesky(Q.Sigma - tf.matmul(A, - sigma.Sigma_inv_x(A), transpose_a=True)) + lemma_factor = tf.cholesky(Q._cov - tf.matmul(A, + sigma.solve(A), transpose_a=True)) logdet = -Q.logdet + sigma.logdet +\ - 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(i_qf_cholesky))) + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) # A' Sigma^{-1} - Atrp_Sinv = tf.matmul(A, sigma.Sigma_inv, transpose_a=True) + Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Q - A' Sigma^{-1} A)^{-1} A' Sigma^{-1} - prod_term = tf.cholesky_solve(i_qf_cholesky, Atrp_Sinv) + prod_term = tf.cholesky_solve(lemma_factor, Atrp_Sinv) - solve = tf.matmul(sigma.Sigma_inv_x(scaled_I(1.0, sigma.size) + + solve = tf.matmul(sigma.solve(scaled_I(1.0, sigma.size) + tf.matmul(A, prod_term)), x) return solve, logdet @@ -119,11 +125,11 @@ def matnorm_logp(x, row_cov, col_cov): colsize = tf.cast(tf.shape(x)[1], 'float64') # precompute sigma_col^{-1} * x' - solve_col = col_cov.Sigma_inv_x(tf.transpose(x)) + solve_col = col_cov.solve(tf.transpose(x)) logdet_col = col_cov.logdet # precompute sigma_row^{-1} * x - solve_row = row_cov.Sigma_inv_x(x) + solve_row = row_cov.solve(x) logdet_row = row_cov.logdet return _mnorm_logp_internal(colsize, rowsize, logdet_row, @@ -145,7 +151,7 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): rowsize = tf.cast(tf.shape(x)[0], 'float64') colsize = tf.cast(tf.shape(x)[1], 'float64') - solve_col = col_cov.Sigma_inv_x(tf.transpose(x)) + solve_col = col_cov.solve(tf.transpose(x)) logdet_col = col_cov.logdet solve_row, logdet_row = solve_det_marginal(x, row_cov, marg, @@ -169,7 +175,7 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): rowsize = tf.cast(tf.shape(x)[0], 'float64') colsize = tf.cast(tf.shape(x)[1], 'float64') - solve_row = row_cov.Sigma_inv_x(x) + solve_row = row_cov.solve(x) logdet_row = row_cov.logdet solve_col, logdet_col = solve_det_marginal(tf.transpose(x), @@ -197,7 +203,7 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): rowsize = tf.cast(tf.shape(x)[0], 'float64') colsize = tf.cast(tf.shape(x)[1], 'float64') - solve_col = col_cov.Sigma_inv_x(tf.transpose(x)) + solve_col = col_cov.solve(tf.transpose(x)) logdet_col = col_cov.logdet solve_row, logdet_row = solve_det_conditional(x, row_cov, cond, @@ -222,7 +228,7 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): rowsize = tf.cast(tf.shape(x)[0], 'float64') colsize = tf.cast(tf.shape(x)[1], 'float64') - solve_row = row_cov.Sigma_inv_x(x) + solve_row = row_cov.solve(x) logdet_row = row_cov.logdet solve_col, logdet_col = solve_det_conditional(tf.transpose(x), diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 2c4bf0ec1..d7fb769fd 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -87,9 +87,9 @@ def fit(self, X, y): # initialize to the least squares solution (basically all # we need now is the cov) - sigma_inv_x = self.time_cov.Sigma_inv_x(self.X)\ + sigma_inv_x = self.time_cov.solve(self.X)\ .eval(session=self.sess, feed_dict=feed_dict) - sigma_inv_y = self.time_cov.Sigma_inv_x(self.Y)\ + sigma_inv_y = self.time_cov.solve(self.Y)\ .eval(session=self.sess, feed_dict=feed_dict) beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), @@ -142,7 +142,7 @@ def calibrate(self, Y): cannot decode.") # Sigma_s^{-1} B' - Sigma_s_btrp = self.space_cov.Sigma_inv_x(tf.transpose( + Sigma_s_btrp = self.space_cov.solve(tf.transpose( self.beta)) # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index c051fb1f0..eea64330a 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -1,4 +1,3 @@ -import functools # https://danijar.com/structuring-your-tensorflow-models/ import tensorflow as tf from scipy.stats import norm from numpy.linalg import cholesky @@ -9,45 +8,6 @@ def rmn(rowcov, colcov): Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0]))) return(cholesky(rowcov).dot(Z).dot(cholesky(colcov))) - -def doublewrap(function): - """ - A decorator decorator, allowing to use the decorator to be used without - parentheses if not arguments are provided. All arguments must be optional. - """ - @functools.wraps(function) - def decorator(*args, **kwargs): - if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): - return function(args[0]) - else: - return lambda wrapee: function(wrapee, *args, **kwargs) - return decorator - - -@doublewrap -def define_scope(function, scope=None, *args, **kwargs): - """ - A decorator for functions that define TensorFlow operations. The wrapped - function will only be executed once. Subsequent calls to it will directly - return the result so that operations are added to the graph only once. - The operations added by the function live within a tf.variable_scope(). If - this decorator is used with arguments, they will be forwarded to the - variable scope. The scope name defaults to the name of the wrapped - function. - """ - attribute = '_cache_' + function.__name__ - name = scope or function.__name__ - - @property - @functools.wraps(function) - def decorator(self): - if not hasattr(self, attribute): - with tf.variable_scope(name, *args, **kwargs): - setattr(self, attribute, function(self)) - return getattr(self, attribute) - return decorator - - def xx_t(x): """ x * x' """ return tf.matmul(x, x, transpose_b=True) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 342caed1a..724a353f3 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -1,6 +1,6 @@ import numpy as np from numpy.testing import assert_allclose -from scipy.stats import norm, wishart, invgamma +from scipy.stats import norm, wishart, invgamma, invwishart from brainiak.matnormal.covs import (CovIdentity, CovAR1, CovIsotropic, @@ -28,7 +28,8 @@ def logdet_sinv_np(X, sigma): # logdet - _, logdet_np = np.linalg.slogdet(sigma) + sign, logdet = np.linalg.slogdet(sigma) + logdet_np = sign * logdet # sigma-inv sinv_np = np.linalg.inv(sigma) # solve @@ -57,21 +58,27 @@ def logdet_sinv_np_mask(X, sigma, mask): X_tf = tf.constant(X) A = norm.rvs(size=(m, p)) A_tf = tf.constant(A) +eye = tf.eye(m, dtype=tf.float64) def test_CovConstant(): cov_np = wishart.rvs(df=m+2, scale=np.eye(m)) - cov = CovUnconstrainedCholesky(m, cov_np) + cov = CovUnconstrainedCholesky(Sigma=cov_np) with tf.Session() as sess: # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) + + # verify what we pass is what we get + cov_tf = cov._cov.eval(session=sess) + assert_allclose(cov_tf, cov_np) + # compute the naive version logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -86,8 +93,8 @@ def test_CovIdentity(): cov_np = np.eye(m) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -102,8 +109,8 @@ def test_CovIsotropic(): cov_np = cov.sigma.eval(session=sess) * np.eye(cov.size) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -118,8 +125,8 @@ def test_CovDiagonal(): cov_np = np.diag(1/cov.prec.eval(session=sess)) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -134,8 +141,8 @@ def test_CovDiagonal_initialized(): # compute the naive version logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -154,8 +161,8 @@ def test_CovDiagonalGammaPrior(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) penalty_np = np.sum(ig.logpdf(1/np.diag(cov_np))) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) assert_allclose(penalty_np, cov.logp.eval(session=sess), rtol=rtol) @@ -168,11 +175,12 @@ def test_CovUnconstrainedCholesky(): # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version - cov_np = cov.Sigma.eval(session=sess) + L = cov.L.eval(session=sess) + cov_np = L @ L.T logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -184,11 +192,14 @@ def test_CovUnconstrainedCholeskyWishartReg(): # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version - cov_np = cov.Sigma.eval(session=sess) + + L = cov.L.eval(session=sess) + cov_np = L @ L.T + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) # now compute the regularizer reg = wishart.logpdf(cov_np, df=m+2, scale=1e10 * np.eye(m)) @@ -197,17 +208,21 @@ def test_CovUnconstrainedCholeskyWishartReg(): def test_CovUnconstrainedInvCholesky(): - cov = CovUnconstrainedInvCholesky(size=m) + init = invwishart.rvs(scale=np.eye(m), df=m+2) + cov = CovUnconstrainedInvCholesky(size=m, invSigma=init) with tf.Session() as sess: # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version - cov_np = cov.Sigma.eval(session=sess) + Linv = cov.Linv.eval(session=sess) + L = np.linalg.inv(Linv) + cov_np = L @ L.T + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -233,8 +248,8 @@ def test_Cov2FactorKron(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -258,8 +273,8 @@ def test_Cov3FactorKron(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -292,9 +307,9 @@ def test_Cov3FactorMaskedKron(): assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol, atol=atol) - assert_allclose(sinv_np, cov.Sigma_inv.eval(session=sess)[ + assert_allclose(sinv_np, cov.solve(eye).eval(session=sess)[ np.ix_(mask_indices, mask_indices)], rtol=rtol, atol=atol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess)[ + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess)[ mask_indices, :], rtol=rtol, atol=atol) @@ -306,11 +321,11 @@ def test_CovAR1(): # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version - cov_np = np.linalg.inv(cov.Sigma_inv.eval(session=sess)) + cov_np = np.linalg.inv(cov.solve(eye).eval(session=sess)) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) @@ -322,9 +337,9 @@ def test_CovAR1_scan_onsets(): # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version - cov_np = np.linalg.inv(cov.Sigma_inv.eval(session=sess)) + cov_np = np.linalg.inv(cov.solve(eye).eval(session=sess)) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index db39166ce..dbf07ff3c 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -17,7 +17,6 @@ rtol = 1e-7 - def test_against_scipy_mvn_row(): with tf.Session() as sess: @@ -29,7 +28,8 @@ def test_against_scipy_mvn_row(): sess.run(tf.global_variables_initializer()) - rowcov_np = rowcov.Sigma.eval(session=sess) + + rowcov_np = rowcov._cov.eval(session=sess) scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) @@ -48,7 +48,7 @@ def test_against_scipy_mvn_col(): sess.run(tf.global_variables_initializer()) - colcov_np = colcov.Sigma.eval(session=sess) + colcov_np = colcov._cov.eval(session=sess) scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 0c4d549bf..1ab7579ab 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -27,12 +27,12 @@ def test_against_scipy_mvn_col_conditional(): cov_np = wishart.rvs(df=m+p+2, scale=np.eye(m+p)) # rowcov = CovConstant(cov_np[0:m, 0:m]) - rowcov = CovUnconstrainedCholesky(size=m, Sigma=cov_np[0:m, 0:m]) + rowcov = CovUnconstrainedCholesky(Sigma=cov_np[0:m, 0:m]) A = cov_np[0:m, m:] colcov = CovIdentity(size=n) - Q = CovUnconstrainedCholesky(size=p, Sigma=cov_np[m:, m:]) + Q = CovUnconstrainedCholesky(Sigma=cov_np[m:, m:]) X = rmn(np.eye(m), np.eye(n)) @@ -43,9 +43,9 @@ def test_against_scipy_mvn_col_conditional(): sess.run(tf.global_variables_initializer()) - Q_np = Q.Sigma.eval(session=sess) + Q_np = Q._cov.eval(session=sess) - rowcov_np = rowcov.Sigma.eval(session=sess) - \ + rowcov_np = rowcov._cov.eval(session=sess) - \ A.dot(np.linalg.inv(Q_np)).dot((A.T)) scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), @@ -62,10 +62,10 @@ def test_against_scipy_mvn_row_conditional(): cov_np = wishart.rvs(df=m+p+2, scale=np.eye(m+p)) rowcov = CovIdentity(size=m) - colcov = CovUnconstrainedCholesky(size=n, Sigma=cov_np[0:n, 0:n]) + colcov = CovUnconstrainedCholesky(Sigma=cov_np[0:n, 0:n]) A = cov_np[n:, 0:n] - Q = CovUnconstrainedCholesky(size=p, Sigma=cov_np[n:, n:]) + Q = CovUnconstrainedCholesky(Sigma=cov_np[n:, n:]) X = rmn(np.eye(m), np.eye(n)) @@ -76,9 +76,9 @@ def test_against_scipy_mvn_row_conditional(): sess.run(tf.global_variables_initializer()) - Q_np = Q.Sigma.eval(session=sess) + Q_np = Q._cov.eval(session=sess) - colcov_np = colcov.Sigma.eval(session=sess) - \ + colcov_np = colcov._cov.eval(session=sess) - \ A.T.dot(np.linalg.inv(Q_np)).dot((A)) scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index a9cc43dc2..ba1b1bc86 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -37,9 +37,9 @@ def test_against_scipy_mvn_row_marginal(): sess.run(tf.global_variables_initializer()) - Q_np = Q.Sigma.eval(session=sess) + Q_np = Q._cov.eval(session=sess) - rowcov_np = rowcov.Sigma.eval(session=sess) + A.dot(Q_np).dot(A.T) + rowcov_np = rowcov._cov.eval(session=sess) + A.dot(Q_np).dot(A.T) scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) @@ -65,9 +65,9 @@ def test_against_scipy_mvn_col_marginal(): sess.run(tf.global_variables_initializer()) - Q_np = Q.Sigma.eval(session=sess) + Q_np = Q._cov.eval(session=sess) - colcov_np = colcov.Sigma.eval(session=sess) + A.T.dot(Q_np).dot(A) + colcov_np = colcov._cov.eval(session=sess) + A.T.dot(Q_np).dot(A) scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) From bb57f74f2ac301b0d6dc2bb0855eede4afbef56a Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Tue, 31 Dec 2019 20:07:51 -0800 Subject: [PATCH 16/84] remove CovTFWrap, use constant cholesky cov with passed Sigma instead --- brainiak/matnormal/covs.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 03b3eeac8..788bd1530 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -66,26 +66,6 @@ def _cov(self): return tf.linalg.inv(self._prec) -# class CovTFWrap(CovBase): -# """ thin wrapper around a TF tensor -# """ -# def __init__(self, Sigma): - -# self.L = tf.cholesky(Sigma) -# self.logdet = 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) - -# def get_optimize_vars(self): -# """ Returns a list of tf variables that need to get optimized to fit -# this covariance -# """ -# return [] - -# def solve(self, X): -# """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` -# """ -# return tf.cholesky_solve(self.L, X) - - class CovIdentity(CovBase): """Identity noise covariance. """ From 1ee89b3b276e0c7da3661ae7e7d542185d7ceddf Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 1 Jan 2020 13:56:23 -0500 Subject: [PATCH 17/84] linter and formatting fixes --- brainiak/matnormal/covs.py | 231 ++++++++++-------- brainiak/matnormal/matnormal_likelihoods.py | 166 +++++++------ brainiak/matnormal/mnrsa.py | 66 ++--- brainiak/matnormal/regression.py | 44 ++-- brainiak/matnormal/utils.py | 3 +- brainiak/utils/kronecker_solvers.py | 8 +- brainiak/utils/utils.py | 1 - tests/matnormal/test_cov.py | 142 ++++++----- tests/matnormal/test_matnormal_logp.py | 15 +- .../test_matnormal_logp_conditional.py | 29 +-- .../matnormal/test_matnormal_logp_marginal.py | 28 +-- tests/matnormal/test_matnormal_regression.py | 24 +- tests/matnormal/test_matnormal_rsa.py | 30 +-- 13 files changed, 433 insertions(+), 354 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 788bd1530..5e1f97f4f 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -5,31 +5,36 @@ import scipy.sparse from tensorflow.contrib.distributions import InverseGamma, WishartCholesky from brainiak.matnormal.utils import x_tx, xx_t -from brainiak.utils.kronecker_solvers import tf_solve_lower_triangular_kron,\ - tf_solve_upper_triangular_kron, \ - tf_solve_lower_triangular_masked_kron, \ - tf_solve_upper_triangular_masked_kron - -__all__ = ['CovBase', - 'CovIdentity', - 'CovAR1', - 'CovIsotropic', - 'CovDiagonal', - 'CovDiagonalGammaPrior', - 'CovUnconstrainedCholesky', - 'CovUnconstrainedCholeskyWishartReg', - 'CovUnconstrainedInvCholesky', - 'CovKroneckerFactored'] +from brainiak.utils.kronecker_solvers import ( + tf_solve_lower_triangular_kron, + tf_solve_upper_triangular_kron, + tf_solve_lower_triangular_masked_kron, + tf_solve_upper_triangular_masked_kron, +) + +__all__ = [ + "CovBase", + "CovIdentity", + "CovAR1", + "CovIsotropic", + "CovDiagonal", + "CovDiagonalGammaPrior", + "CovUnconstrainedCholesky", + "CovUnconstrainedCholeskyWishartReg", + "CovUnconstrainedInvCholesky", + "CovKroneckerFactored", +] class CovBase(object): """Base metaclass for noise covariances """ + __metaclass__ = abc.ABCMeta def __init__(self, size): self.size = size - + # Log-likelihood of this covariance (useful for regularization) self.logp = tf.constant(0, dtype=tf.float64) @@ -53,14 +58,14 @@ def solve(self, X): @property def _prec(self): - """Expose the precision explicitly (mostly for testing / + """Expose the precision explicitly (mostly for testing / visualization) """ return self.solve(tf.eye(self.size, dtype=tf.float64)) @property def _cov(self): - """Expose the covariance explicitly (mostly for testing / + """Expose the covariance explicitly (mostly for testing / visualization) """ return tf.linalg.inv(self._prec) @@ -69,9 +74,10 @@ def _cov(self): class CovIdentity(CovBase): """Identity noise covariance. """ + def __init__(self, size): super(CovIdentity, self).__init__(size) - self.logdet = tf.constant(0.0, 'float64') + self.logdet = tf.constant(0.0, "float64") def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to @@ -88,6 +94,7 @@ def solve(self, X): class CovAR1(CovBase): """AR1 covariance """ + def __init__(self, size, rho=None, sigma=None, scan_onsets=None): super(CovAR1, self).__init__(size) @@ -95,36 +102,40 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): # Similar to BRSA trick I think if scan_onsets is None: self.run_sizes = [size] - self.offdiag_template = tf.constant(scipy.linalg.toeplitz(np.r_[0, - 1, np.zeros(size-2)]), - dtype=tf.float64) - self.diag_template = tf.constant(np.diag(np.r_[0, - np.ones(size-2), - 0])) + self.offdiag_template = tf.constant( + scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)]), + dtype=tf.float64 + ) + self.diag_template = tf.constant(np.diag( + np.r_[0, np.ones(size - 2), 0])) else: self.run_sizes = np.ediff1d(np.r_[scan_onsets, size]) - sub_offdiags = [scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(r-2)]) - for r in self.run_sizes] - self.offdiag_template = tf.constant(scipy.sparse. - block_diag(sub_offdiags) - .toarray()) - subdiags = [np.diag(np.r_[0, np.ones(r-2), 0]) + sub_offdiags = [ + scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(r - 2)]) + for r in self.run_sizes + ] + self.offdiag_template = tf.constant( + scipy.sparse.block_diag(sub_offdiags).toarray() + ) + subdiags = [np.diag(np.r_[0, np.ones(r - 2), 0]) for r in self.run_sizes] - self.diag_template = tf.constant(scipy.sparse. - block_diag(subdiags) - .toarray()) + self.diag_template = tf.constant( + scipy.sparse.block_diag(subdiags).toarray() + ) self._identity_mat = tf.constant(np.eye(size)) if sigma is None: - self.log_sigma = tf.Variable(tf.random_normal([1], - dtype=tf.float64), name="sigma") + self.log_sigma = tf.Variable( + tf.random_normal([1], dtype=tf.float64), name="sigma" + ) else: self.log_sigma = tf.Variable(np.log(sigma), name="sigma") if rho is None: - self.rho_unc = tf.Variable(tf.random_normal([1], dtype=tf.float64), - name="rho") + self.rho_unc = tf.Variable( + tf.random_normal([1], dtype=tf.float64), name="rho" + ) else: self.rho_unc = tf.Variable(np.log(rho), name="rho") @@ -132,9 +143,10 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): rho = 2 * tf.sigmoid(self.rho_unc) - 1 sigma = tf.exp(self.log_sigma) # now compute logdet - self.logdet = tf.reduce_sum(2 * tf.constant(self.run_sizes, - dtype=tf.float64) * - tf.log(sigma) - tf.log(1 - tf.square(rho))) + self.logdet = tf.reduce_sum( + 2 * tf.constant(self.run_sizes, dtype=tf.float64) * tf.log(sigma) + - tf.log(1 - tf.square(rho)) + ) # precompute sigma_inv op # Unlike BRSA we assume stationarity within block so no special case @@ -144,9 +156,11 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): rho = 2 * tf.sigmoid(self.rho_unc) - 1 sigma = tf.exp(self.log_sigma) - self.Sigma_inv = (self._identity_mat - rho * self.offdiag_template + rho**2 * - self.diag_template) / tf.square(sigma) - + self.Sigma_inv = ( + self._identity_mat + - rho * self.offdiag_template + + rho ** 2 * self.diag_template + ) / tf.square(sigma) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to @@ -167,8 +181,9 @@ class CovIsotropic(CovBase): def __init__(self, size, sigma=None): super(CovIsotropic, self).__init__(size) if sigma is None: - self.log_sigma = tf.Variable(tf.random_normal([1], - dtype=tf.float64), name="sigma") + self.log_sigma = tf.Variable( + tf.random_normal([1], dtype=tf.float64), name="sigma" + ) else: self.log_sigma = tf.Variable(np.log(sigma), name="sigma") @@ -182,7 +197,6 @@ def get_optimize_vars(self): """ return [self.log_sigma] - def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` """ @@ -192,13 +206,16 @@ def solve(self, X): class CovDiagonal(CovBase): """Uncorrelated (diagonal) noise covariance """ + def __init__(self, size, sigma=None): super(CovDiagonal, self).__init__(size) if sigma is None: - self.logprec = tf.Variable(tf.random_normal([size], - dtype=tf.float64), name="precisions") + self.logprec = tf.Variable( + tf.random_normal([size], dtype=tf.float64), name="precisions" + ) else: - self.logprec = tf.Variable(np.log(1/sigma), name="log-precisions") + self.logprec = tf.Variable( + np.log(1 / sigma), name="log-precisions") self.logdet = -tf.reduce_sum(self.logprec) self.prec = tf.exp(self.logprec) @@ -219,12 +236,14 @@ def solve(self, X): class CovDiagonalGammaPrior(CovDiagonal): """Uncorrelated (diagonal) noise covariance """ + def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): super(CovDiagonalGammaPrior, self).__init__(size, sigma) - self.ig = InverseGamma(concentration=tf.constant(alpha, - dtype=tf.float64), - rate=tf.constant(beta, dtype=tf.float64)) + self.ig = InverseGamma( + concentration=tf.constant(alpha, dtype=tf.float64), + rate=tf.constant(beta, dtype=tf.float64), + ) self.logp = tf.reduce_sum(self.ig.log_prob(self.prec)) @@ -234,7 +253,7 @@ class CovUnconstrainedCholesky(CovBase): """ def __init__(self, size=None, Sigma=None): - + if size is None and Sigma is None: raise RuntimeError("Must pass either Sigma or size") @@ -245,21 +264,21 @@ def __init__(self, size=None, Sigma=None): size = Sigma.shape[0] super(CovUnconstrainedCholesky, self).__init__(size) - + if Sigma is None: - self.L_full = tf.Variable(tf.random_normal([size, size], - dtype=tf.float64), - name="L_full", dtype="float64") - + self.L_full = tf.Variable( + tf.random_normal([size, size], dtype=tf.float64), + name="L_full", + dtype="float64", + ) + else: # in order to respect the Sigma we got passed in, we log the diag # which we will later exp. a little ugly but this # is a rare use case L = np.linalg.cholesky(Sigma) L[np.diag_indices_from(L)] = np.log(np.diag(L)) - self.L_full = tf.Variable(L, name="L_full", - dtype="float64") - + self.L_full = tf.Variable(L, name="L_full", dtype="float64") # Zero out triu of L_full to get cholesky L. # This seems dumb but TF is smart enough to set the gradient to zero @@ -268,10 +287,11 @@ def __init__(self, size=None, Sigma=None): # packing (for now). # Also: to make the parameterization unique we exp the diagonal so # it's positive. - - L_indeterminate = tf.matrix_band_part(self.L_full, -1, 0) - self.L = tf.matrix_set_diag(L_indeterminate, - tf.exp(tf.matrix_diag_part(L_indeterminate))) + + L_indeterminate = tf.linalg.band_part(self.L_full, -1, 0) + self.L = tf.matrix_set_diag( + L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) + ) self.logdet = 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) @@ -288,6 +308,7 @@ def solve(self, X): """ return tf.cholesky_solve(self.L, X) + class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): """Unconstrained noise covariance parameterized in terms of its cholesky factor. @@ -297,30 +318,32 @@ class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): def __init__(self, size, Sigma=None): super(CovUnconstrainedCholeskyWishartReg, self).__init__(size) - self.wishartReg = WishartCholesky(df=tf.constant(size+2, - dtype=tf.float64), - scale=tf.constant(1e5 * np.eye(size), - dtype=tf.float64)) + self.wishartReg = WishartCholesky( + df=tf.constant(size + 2, dtype=tf.float64), + scale=tf.constant(1e5 * np.eye(size), dtype=tf.float64), + ) Sigma = xx_t(self.L) self.logp = self.wishartReg.log_prob(Sigma) + class CovUnconstrainedInvCholesky(CovBase): """Unconstrained noise covariance parameterized - in terms of its precision cholesky. Use this over the + in terms of its precision cholesky. Use this over the regular cholesky unless you have a good reason not to, since - you save a solve on every step. + you save a solve on every step. """ def __init__(self, size, invSigma=None): super(CovUnconstrainedInvCholesky, self).__init__(size) - + if invSigma is None: - self.Linv_full = tf.Variable(tf.random_normal([size, size], - dtype=tf.float64), name="Linv_full") + self.Linv_full = tf.Variable( + tf.random_normal([size, size], dtype=tf.float64), + name="Linv_full") else: - self.Linv_full = tf.Variable(np.linalg.cholesky(invSigma), - name="Linv_full") + self.Linv_full = tf.Variable( + np.linalg.cholesky(invSigma), name="Linv_full") # Zero out triu of L_full to get cholesky L. # This seems dumb but TF is smart enough to set the gradient to zero @@ -329,10 +352,12 @@ def __init__(self, size, invSigma=None): # packing (for now). # Also: to make the parameterization unique we log the diagonal so # it's positive. - L_indeterminate = tf.matrix_band_part(self.Linv_full, -1, 0) - self.Linv = tf.matrix_set_diag(L_indeterminate, - tf.exp(tf.matrix_diag_part(L_indeterminate))) - self.logdet = -2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) + L_indeterminate = tf.linalg.band_part(self.Linv_full, -1, 0) + self.Linv = tf.matrix_set_diag( + L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) + ) + self.logdet = -2 * \ + tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -379,27 +404,35 @@ def __init__(self, sizes, Sigmas=None, mask=None): If sizes is not a list """ if not isinstance(sizes, list): - raise TypeError('sizes is not a list') + raise TypeError("sizes is not a list") self.sizes = sizes self.nfactors = len(sizes) self.size = np.prod(np.array(sizes), dtype=np.int32) if Sigmas is None: - self.L_full = [tf.Variable(tf.random_normal([sizes[i], sizes[i]], - dtype=tf.float64), name="L"+str(i)+"_full") - for i in range(self.nfactors)] + self.L_full = [ + tf.Variable( + tf.random_normal([sizes[i], sizes[i]], dtype=tf.float64), + name="L" + str(i) + "_full", + ) + for i in range(self.nfactors) + ] else: - self.L_full = [tf.Variable(np.linalg.cholesky(Sigmas[i]), - name="L"+str(i)+"_full") - for i in range(self.nfactors)] + self.L_full = [ + tf.Variable(np.linalg.cholesky( + Sigmas[i]), name="L" + str(i) + "_full") + for i in range(self.nfactors) + ] self.mask = mask # make a list of choleskys - L_indeterminate = [tf.matrix_band_part(mat, -1, 0) - for mat in self.L_full] - self.L = [tf.matrix_set_diag(mat, tf.exp(tf.matrix_diag_part(mat))) - for mat in L_indeterminate] + L_indeterminate = [tf.linalg.band_part( + mat, -1, 0) for mat in self.L_full] + self.L = [ + tf.matrix_set_diag(mat, tf.exp(tf.matrix_diag_part(mat))) + for mat in L_indeterminate + ] self.logdet = self._make_logdet() @@ -416,9 +449,10 @@ def _make_logdet(self): n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in self.L]) n_prod = tf.reduce_prod(n_list) - logdet = tf.stack([tf.reduce_sum(tf.log(tf.diag_part(mat))) - for mat in self.L]) - logdetfinal = tf.reduce_sum((logdet*n_prod)/n_list) + logdet = tf.stack( + [tf.reduce_sum(tf.log(tf.diag_part(mat))) for mat in self.L] + ) + logdetfinal = tf.reduce_sum((logdet * n_prod) / n_list) else: n_list = [tf.shape(mat)[0] for mat in self.L] mask_reshaped = tf.reshape(self.mask, n_list) @@ -426,10 +460,11 @@ def _make_logdet(self): for i in range(self.nfactors): indices = list(range(self.nfactors)) indices.remove(i) - logdet += tf.log(tf.diag_part(self.L[i])) *\ - tf.to_double(tf.reduce_sum(mask_reshaped, indices)) + logdet += tf.log(tf.diag_part(self.L[i])) * tf.to_double( + tf.reduce_sum(mask_reshaped, indices) + ) logdetfinal = tf.reduce_sum(logdet) - return (2.0*logdetfinal) + return 2.0 * logdetfinal def solve(self, X): """ Given this Sigma and some X, compute Sigma^{-1} * x using diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 26fda1444..25cbf7e99 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -10,58 +10,64 @@ def _condition(X): Condition number, used for diagnostics """ s = tf.svd(X, compute_uv=False) - return tf.reduce_max(s)/tf.reduce_min(s) + return tf.reduce_max(s) / tf.reduce_min(s) def solve_det_marginal(x, sigma, A, Q): """ Use matrix inversion lemma for the solve: .. math:: - (\Sigma + AQA')^{-1} X =\\ - \Sigma^{-1} - \Sigma^{-1} A (Q^{-1} + - A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} + (\\Sigma + AQA')^{-1} X =\\ + \\Sigma^{-1} - \\Sigma^{-1} A (Q^{-1} + + A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1} Use matrix determinant lemma for determinant: .. math:: - \log|(\Sigma + AQA')| = \log|Q^{-1} + A' \Sigma^{-1} A| - + \log|Q| + \log|\Sigma| + \\log|(\\Sigma + AQA')| = \\log|Q^{-1} + A' \\Sigma^{-1} A| + + \\log|Q| + \\log|\\Sigma| """ - # For diagnostics, we want to check condition numbers + # For diagnostics, we want to check condition numbers # of things we invert. This includes Q and Sigma, as well # as the "lemma factor" for lack of a better definition if logging.getLogger().isEnabledFor(logging.DEBUG): logging.log("Printing diagnostics for solve_det_marginal") - A = tf.Print(A, [_condition(Q._prec + tf.matmul(A, - sigma.solve(A), transpose_a=True))], - 'lemma_factor condition') - A = tf.Print(A, [_condition(Q._cov)], 'Q condition') - A = tf.Print(A, [_condition(sigma._cov)], 'sigma condition') - A = tf.Print(A, [tf.reduce_max(A), tf.reduce_min(A)], 'A minmax') + A = tf.Print(A, [_condition(Q._prec + tf.matmul(A, sigma.solve(A), + transpose_a=True))], + "lemma_factor condition") + A = tf.Print(A, [_condition(Q._cov)], "Q condition") + A = tf.Print(A, [_condition(sigma._cov)], "sigma condition") + A = tf.Print(A, [tf.reduce_max(A), tf.reduce_min(A)], "A minmax") # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like # a schur complement by isn't, so we call it the "lemma factor" # since we use it in woodbury and matrix determinant lemmas - lemma_factor = tf.cholesky(Q._prec + tf.matmul(A, - sigma.solve(A), transpose_a=True)) + lemma_factor = tf.cholesky(Q._prec + tf.matmul(A, sigma.solve(A), + transpose_a=True)) - logdet = Q.logdet + sigma.logdet +\ - 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) + logdet = ( + Q.logdet + + sigma.logdet + + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) + ) if logging.getLogger().isEnabledFor(logging.DEBUG): - logdet = tf.Print(logdet, [Q.logdet], 'Q logdet') - logdet = tf.Print(logdet, [sigma.logdet], 'sigma logdet') - logdet = tf.Print(logdet, [2 * tf.reduce_sum(tf.log( - tf.matrix_diag_part(lemma_factor)))], - 'iqf logdet') + logdet = tf.Print(logdet, [Q.logdet], "Q logdet") + logdet = tf.Print(logdet, [sigma.logdet], "sigma logdet") + logdet = tf.Print( + logdet, + [2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor)))], + "iqf logdet", + ) # A' Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Qinv + A' Sigma^{-1} A)^{-1} A' Sigma^{-1} prod_term = tf.cholesky_solve(lemma_factor, Atrp_Sinv) - solve = tf.matmul(sigma.solve(scaled_I(1.0, sigma.size) - - tf.matmul(A, prod_term)), x) + solve = tf.matmul( + sigma.solve(scaled_I(1.0, sigma.size) - tf.matmul(A, prod_term)), x + ) return solve, logdet @@ -70,49 +76,54 @@ def solve_det_conditional(x, sigma, A, Q): """ Use matrix inversion lemma for the solve: .. math:: - (\Sigma - AQ^{-1}A')^{-1} X =\\ - \Sigma^{-1} + \Sigma^{-1} A (Q - - A' \Sigma^{-1} A)^{-1} A' \Sigma^{-1} X + (\\Sigma - AQ^{-1}A')^{-1} X =\\ + \\Sigma^{-1} + \\Sigma^{-1} A (Q - + A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1} X Use matrix determinant lemma for determinant: .. math:: - \log|(\Sigma - AQ^{-1}A')| = - \log|Q - A' \Sigma^{-1} A| - \log|Q| + \log|\Sigma| + \\log|(\\Sigma - AQ^{-1}A')| = + \\log|Q - A' \\Sigma^{-1} A| - \\log|Q| + \\log|\\Sigma| """ # (Q - A' Sigma^{-1} A) - lemma_factor = tf.cholesky(Q._cov - tf.matmul(A, - sigma.solve(A), transpose_a=True)) + lemma_factor = tf.cholesky( + Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True)) - logdet = -Q.logdet + sigma.logdet +\ - 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) + logdet = ( + -Q.logdet + + sigma.logdet + + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) + ) # A' Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Q - A' Sigma^{-1} A)^{-1} A' Sigma^{-1} prod_term = tf.cholesky_solve(lemma_factor, Atrp_Sinv) - solve = tf.matmul(sigma.solve(scaled_I(1.0, sigma.size) + - tf.matmul(A, prod_term)), x) + solve = tf.matmul( + sigma.solve(scaled_I(1.0, sigma.size) + tf.matmul(A, prod_term)), x + ) return solve, logdet -def _mnorm_logp_internal(colsize, rowsize, logdet_row, logdet_col, - solve_row, solve_col): +def _mnorm_logp_internal( + colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col +): """Construct logp from the solves and determinants. """ log2pi = 1.8378770664093453 if logging.getLogger().isEnabledFor(logging.DEBUG): - solve_row = tf.Print(solve_row, [tf.trace(solve_col)], 'coltrace') - solve_row = tf.Print(solve_row, [tf.trace(solve_row)], 'rowtrace') - solve_row = tf.Print(solve_row, [logdet_row], 'logdet_row') - solve_row = tf.Print(solve_row, [logdet_col], 'logdet_col') - - denominator = - rowsize * colsize * log2pi -\ - colsize * logdet_row - rowsize * logdet_col - numerator = - tf.trace(tf.matmul(solve_col, solve_row)) + solve_row = tf.Print(solve_row, [tf.trace(solve_col)], "coltrace") + solve_row = tf.Print(solve_row, [tf.trace(solve_row)], "rowtrace") + solve_row = tf.Print(solve_row, [logdet_row], "logdet_row") + solve_row = tf.Print(solve_row, [logdet_col], "logdet_col") + + denominator = (-rowsize * colsize * log2pi - + colsize * logdet_row - rowsize * logdet_col) + numerator = -tf.trace(tf.matmul(solve_col, solve_row)) return 0.5 * (numerator + denominator) @@ -121,8 +132,8 @@ def matnorm_logp(x, row_cov, col_cov): Assumes that row_cov and col_cov follow the API defined in CovBase. """ - rowsize = tf.cast(tf.shape(x)[0], 'float64') - colsize = tf.cast(tf.shape(x)[1], 'float64') + rowsize = tf.cast(tf.shape(x)[0], "float64") + colsize = tf.cast(tf.shape(x)[1], "float64") # precompute sigma_col^{-1} * x' solve_col = col_cov.solve(tf.transpose(x)) @@ -132,8 +143,9 @@ def matnorm_logp(x, row_cov, col_cov): solve_row = row_cov.solve(x) logdet_row = row_cov.logdet - return _mnorm_logp_internal(colsize, rowsize, logdet_row, - logdet_col, solve_row, solve_col) + return _mnorm_logp_internal( + colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col + ) def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): @@ -148,17 +160,17 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): This method exploits the matrix inversion and determinant lemmas to construct S + APA' given the covariance API in in CovBase. """ - rowsize = tf.cast(tf.shape(x)[0], 'float64') - colsize = tf.cast(tf.shape(x)[1], 'float64') + rowsize = tf.cast(tf.shape(x)[0], "float64") + colsize = tf.cast(tf.shape(x)[1], "float64") solve_col = col_cov.solve(tf.transpose(x)) logdet_col = col_cov.logdet - solve_row, logdet_row = solve_det_marginal(x, row_cov, marg, - marg_cov) + solve_row, logdet_row = solve_det_marginal(x, row_cov, marg, marg_cov) - return _mnorm_logp_internal(colsize, rowsize, logdet_row, - logdet_col, solve_row, solve_col) + return _mnorm_logp_internal( + colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col + ) def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): @@ -172,19 +184,19 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): This method exploits the matrix inversion and determinant lemmas to construct S + APA' given the covariance API in in CovBase. """ - rowsize = tf.cast(tf.shape(x)[0], 'float64') - colsize = tf.cast(tf.shape(x)[1], 'float64') + rowsize = tf.cast(tf.shape(x)[0], "float64") + colsize = tf.cast(tf.shape(x)[1], "float64") solve_row = row_cov.solve(x) logdet_row = row_cov.logdet - solve_col, logdet_col = solve_det_marginal(tf.transpose(x), - col_cov, - tf.transpose(marg), - marg_cov) + solve_col, logdet_col = solve_det_marginal( + tf.transpose(x), col_cov, tf.transpose(marg), marg_cov + ) - return _mnorm_logp_internal(colsize, rowsize, logdet_row, - logdet_col, solve_row, solve_col) + return _mnorm_logp_internal( + colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col + ) def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): @@ -200,17 +212,17 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): construct S - APA' given the covariance API in in CovBase. """ - rowsize = tf.cast(tf.shape(x)[0], 'float64') - colsize = tf.cast(tf.shape(x)[1], 'float64') + rowsize = tf.cast(tf.shape(x)[0], "float64") + colsize = tf.cast(tf.shape(x)[1], "float64") solve_col = col_cov.solve(tf.transpose(x)) logdet_col = col_cov.logdet - solve_row, logdet_row = solve_det_conditional(x, row_cov, cond, - cond_cov) + solve_row, logdet_row = solve_det_conditional(x, row_cov, cond, cond_cov) - return _mnorm_logp_internal(colsize, rowsize, logdet_row, - logdet_col, solve_row, solve_col) + return _mnorm_logp_internal( + colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col + ) def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): @@ -225,16 +237,16 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): This method exploits the matrix inversion and determinant lemmas to construct S - APA' given the covariance API in in CovBase. """ - rowsize = tf.cast(tf.shape(x)[0], 'float64') - colsize = tf.cast(tf.shape(x)[1], 'float64') + rowsize = tf.cast(tf.shape(x)[0], "float64") + colsize = tf.cast(tf.shape(x)[1], "float64") solve_row = row_cov.solve(x) logdet_row = row_cov.logdet - solve_col, logdet_col = solve_det_conditional(tf.transpose(x), - col_cov, - tf.transpose(cond), - cond_cov) + solve_col, logdet_col = solve_det_conditional( + tf.transpose(x), col_cov, tf.transpose(cond), cond_cov + ) - return _mnorm_logp_internal(colsize, rowsize, logdet_row, - logdet_col, solve_row, solve_col) + return _mnorm_logp_internal( + colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col + ) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index e5af214a3..58ef9da16 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -8,7 +8,7 @@ from tensorflow.contrib.opt import ScipyOptimizerInterface import logging -__all__ = ['MNRSA'] +__all__ = ["MNRSA"] class MNRSA(BaseEstimator): @@ -60,8 +60,8 @@ class MNRSA(BaseEstimator): """ - def __init__(self, time_cov, space_cov, n_nureg=5, - optimizer='L-BFGS-B', optCtrl=None): + def __init__(self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", + optCtrl=None): self.n_T = time_cov.size self.n_V = space_cov.size @@ -73,8 +73,9 @@ def __init__(self, time_cov, space_cov, n_nureg=5, self.X = tf.placeholder(tf.float64, [self.n_T, None], name="Design") self.Y = tf.placeholder(tf.float64, [self.n_T, self.n_V], name="Brain") - self.X_0 = tf.Variable(tf.random_normal([self.n_T, n_nureg], - dtype=tf.float64), name="X_0") + self.X_0 = tf.Variable( + tf.random_normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0" + ) self.train_variables = [self.X_0] @@ -124,33 +125,36 @@ def fit(self, X, y, structured_RSA_cov=None): self.L_full = tf.Variable(naiveRSA_L, name="L_full", dtype="float64") L_indeterminate = tf.matrix_band_part(self.L_full, -1, 0) - self.L = tf.matrix_set_diag(L_indeterminate, - tf.exp(tf.matrix_diag_part( - L_indeterminate))) + self.L = tf.matrix_set_diag( + L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) + ) self.train_variables.extend([self.L_full]) self.x_stack = tf.concat([tf.matmul(self.X, self.L), self.X_0], 1) self.sess.run(tf.global_variables_initializer(), feed_dict=feed_dict) - optimizer = ScipyOptimizerInterface(-self.logp(), - var_list=self.train_variables, - method=self.optMethod, - options=self.optCtrl) + optimizer = ScipyOptimizerInterface( + -self.logp(), + var_list=self.train_variables, + method=self.optMethod, + options=self.optCtrl, + ) if logging.getLogger().isEnabledFor(logging.INFO): optimizer._packed_loss_grad = tf.Print( - optimizer._packed_loss_grad, - [tf.reduce_min( - optimizer._packed_loss_grad)], - 'mingrad') + optimizer._packed_loss_grad, + [tf.reduce_min(optimizer._packed_loss_grad)], + "mingrad", + ) optimizer._packed_loss_grad = tf.Print( - optimizer._packed_loss_grad, - [tf.reduce_max( - optimizer._packed_loss_grad)], - 'maxgrad') - optimizer._packed_loss_grad = tf.Print(optimizer._packed_loss_grad, - [self.logp()], 'logp') + optimizer._packed_loss_grad, + [tf.reduce_max(optimizer._packed_loss_grad)], + "maxgrad", + ) + optimizer._packed_loss_grad = tf.Print( + optimizer._packed_loss_grad, [self.logp()], "logp" + ) optimizer.minimize(session=self.sess, feed_dict=feed_dict) @@ -164,9 +168,15 @@ def logp(self): rsa_cov = CovIdentity(size=self.n_c + self.n_nureg) - return self.time_cov.logp + \ - self.space_cov.logp + \ - rsa_cov.logp + \ - matnorm_logp_marginal_row(self.Y, row_cov=self.time_cov, - col_cov=self.space_cov, - marg=self.x_stack, marg_cov=rsa_cov) + return ( + self.time_cov.logp + + self.space_cov.logp + + rsa_cov.logp + + matnorm_logp_marginal_row( + self.Y, + row_cov=self.time_cov, + col_cov=self.space_cov, + marg=self.x_stack, + marg_cov=rsa_cov, + ) + ) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index d7fb769fd..f6a670636 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -4,7 +4,7 @@ from brainiak.matnormal.matnormal_likelihoods import matnorm_logp from tensorflow.contrib.opt import ScipyOptimizerInterface -__all__ = ['MatnormRegression'] +__all__ = ["MatnormRegression"] class MatnormRegression(BaseEstimator): @@ -24,8 +24,9 @@ class MatnormRegression(BaseEstimator): Step size for the Adam optimizer """ - def __init__(self, time_cov, space_cov, - optimizer='L-BFGS-B', optCtrl=None): + + def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", + optCtrl=None): self.optCtrl, self.optMethod = optCtrl, optimizer self.time_cov = time_cov @@ -87,10 +88,12 @@ def fit(self, X, y): # initialize to the least squares solution (basically all # we need now is the cov) - sigma_inv_x = self.time_cov.solve(self.X)\ - .eval(session=self.sess, feed_dict=feed_dict) - sigma_inv_y = self.time_cov.solve(self.Y)\ - .eval(session=self.sess, feed_dict=feed_dict) + sigma_inv_x = self.time_cov.solve(self.X).eval( + session=self.sess, feed_dict=feed_dict + ) + sigma_inv_y = self.time_cov.solve(self.Y).eval( + session=self.sess, feed_dict=feed_dict + ) beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) @@ -103,10 +106,12 @@ def fit(self, X, y): self.sess.run(tf.variables_initializer([self.beta])) - optimizer = ScipyOptimizerInterface(-self.logp(), - var_list=self.train_variables, - method=self.optMethod, - options=self.optCtrl) + optimizer = ScipyOptimizerInterface( + -self.logp(), + var_list=self.train_variables, + method=self.optMethod, + options=self.optCtrl, + ) optimizer.minimize(session=self.sess, feed_dict=feed_dict) @@ -129,7 +134,7 @@ def calibrate(self, Y): trained mapping. This method just does naive MLE: .. math:: - X = Y \Sigma_s^{-1}B'(B \Sigma_s^{-1} B')^{-1} + X = Y \\Sigma_s^{-1}B'(B \\Sigma_s^{-1} B')^{-1} Parameters ---------- @@ -137,18 +142,19 @@ def calibrate(self, Y): fMRI dataset """ - if (Y.shape[1] <= self.n_c): - raise RuntimeError("More conditions than voxels! System is singular,\ - cannot decode.") + if Y.shape[1] <= self.n_c: + raise RuntimeError( + "More conditions than voxels! System is singular,\ + cannot decode." + ) # Sigma_s^{-1} B' - Sigma_s_btrp = self.space_cov.solve(tf.transpose( - self.beta)) + Sigma_s_btrp = self.space_cov.solve(tf.transpose(self.beta)) # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) # (B Sigma_s^{-1} B')^{-1} - B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp)\ - .eval(session=self.sess) + B_Sigma_Btrp = tf.matmul( + self.beta, Sigma_s_btrp).eval(session=self.sess) X_test = np.linalg.solve(B_Sigma_Btrp.T, Y_Sigma_Btrp.T).T diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index eea64330a..62c486159 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -6,7 +6,8 @@ def rmn(rowcov, colcov): # generate random draws from a zero-mean matrix-normal distribution Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0]))) - return(cholesky(rowcov).dot(Z).dot(cholesky(colcov))) + return cholesky(rowcov).dot(Z).dot(cholesky(colcov)) + def xx_t(x): """ x * x' """ diff --git a/brainiak/utils/kronecker_solvers.py b/brainiak/utils/kronecker_solvers.py index fc67b1764..f65e92b9f 100644 --- a/brainiak/utils/kronecker_solvers.py +++ b/brainiak/utils/kronecker_solvers.py @@ -5,6 +5,7 @@ "tf_masked_triangular_solve", ] + def tf_solve_lower_triangular_kron(L, y): """ Tensor flow function to solve L x = y where L = kron(L[0], L[1] .. L[n-1]) @@ -132,7 +133,9 @@ def tf_kron_mult(L, x): for i in range(na): ya, yb, yc = tf.split(y, [i*nb, nb, (na-i-1)*nb], 0) yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), - tf.transpose(tf.slice(L[0], [i, 0], [1, na]))), + tf.transpose(tf.slice(L[0], + [i, 0], + [1, na]))), [nb, col]) y = tf.concat(axis=0, values=[ya, yb, yc]) return y @@ -169,7 +172,8 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): zero = tf.constant(0, dtype=tf.int32) mask_mat = tf.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1, 1]), - tf.reshape(mask, [1, -1])), zero)) + tf.reshape(mask, [1, -1])), + zero)) q = tf.to_int32(tf.sqrt(tf.to_double(tf.shape(mask_mat)[0]))) L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q]) diff --git a/brainiak/utils/utils.py b/brainiak/utils/utils.py index e9a92b8cd..110be8b96 100644 --- a/brainiak/utils/utils.py +++ b/brainiak/utils/utils.py @@ -18,7 +18,6 @@ import psutil from .fmrisim import generate_stimfunction, _double_gamma_hrf, convolve_hrf from scipy.fftpack import fft, ifft -import math import logging diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 724a353f3..4234e6f03 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -1,15 +1,17 @@ import numpy as np from numpy.testing import assert_allclose from scipy.stats import norm, wishart, invgamma, invwishart -from brainiak.matnormal.covs import (CovIdentity, - CovAR1, - CovIsotropic, - CovDiagonal, - CovDiagonalGammaPrior, - CovUnconstrainedCholesky, - CovUnconstrainedCholeskyWishartReg, - CovUnconstrainedInvCholesky, - CovKroneckerFactored) +from brainiak.matnormal.covs import ( + CovIdentity, + CovAR1, + CovIsotropic, + CovDiagonal, + CovDiagonalGammaPrior, + CovUnconstrainedCholesky, + CovUnconstrainedCholeskyWishartReg, + CovUnconstrainedInvCholesky, + CovKroneckerFactored, +) import tensorflow as tf import pytest import logging @@ -63,7 +65,7 @@ def logdet_sinv_np_mask(X, sigma, mask): def test_CovConstant(): - cov_np = wishart.rvs(df=m+2, scale=np.eye(m)) + cov_np = wishart.rvs(df=m + 2, scale=np.eye(m)) cov = CovUnconstrainedCholesky(Sigma=cov_np) with tf.Session() as sess: @@ -76,10 +78,10 @@ def test_CovConstant(): # compute the naive version logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) + assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_CovIdentity(): @@ -94,8 +96,8 @@ def test_CovIdentity(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_CovIsotropic(): @@ -110,8 +112,8 @@ def test_CovIsotropic(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_CovDiagonal(): @@ -122,12 +124,12 @@ def test_CovDiagonal(): # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version - cov_np = np.diag(1/cov.prec.eval(session=sess)) + cov_np = np.diag(1 / cov.prec.eval(session=sess)) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_CovDiagonal_initialized(): @@ -142,15 +144,15 @@ def test_CovDiagonal_initialized(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_CovDiagonalGammaPrior(): cov_np = np.diag(np.exp(np.random.normal(size=m))) - cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, - beta=1e-10) + cov = CovDiagonalGammaPrior( + size=m, sigma=np.diag(cov_np), alpha=1.5, beta=1e-10) ig = invgamma(1.5, scale=1e-10) @@ -159,11 +161,11 @@ def test_CovDiagonalGammaPrior(): sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - penalty_np = np.sum(ig.logpdf(1/np.diag(cov_np))) + penalty_np = np.sum(ig.logpdf(1 / np.diag(cov_np))) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) assert_allclose(penalty_np, cov.logp.eval(session=sess), rtol=rtol) @@ -180,8 +182,8 @@ def test_CovUnconstrainedCholesky(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_CovUnconstrainedCholeskyWishartReg(): @@ -199,16 +201,16 @@ def test_CovUnconstrainedCholeskyWishartReg(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) # now compute the regularizer - reg = wishart.logpdf(cov_np, df=m+2, scale=1e10 * np.eye(m)) + reg = wishart.logpdf(cov_np, df=m + 2, scale=1e10 * np.eye(m)) assert_allclose(reg, cov.logp.eval(session=sess), rtol=rtol) def test_CovUnconstrainedInvCholesky(): - init = invwishart.rvs(scale=np.eye(m), df=m+2) + init = invwishart.rvs(scale=np.eye(m), df=m + 2) cov = CovUnconstrainedInvCholesky(size=m, invSigma=init) with tf.Session() as sess: @@ -218,17 +220,17 @@ def test_CovUnconstrainedInvCholesky(): Linv = cov.Linv.eval(session=sess) L = np.linalg.inv(Linv) cov_np = L @ L.T - + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_Cov2FactorKron(): - assert(m % 2 == 0) - dim1 = int(m/2) + assert m % 2 == 0 + dim1 = int(m / 2) dim2 = 2 with pytest.raises(TypeError) as excinfo: @@ -249,13 +251,13 @@ def test_Cov2FactorKron(): assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_Cov3FactorKron(): - assert(m % 4 == 0) - dim1 = int(m/4) + assert m % 4 == 0 + dim1 = int(m / 4) dim2 = 2 dim3 = 2 cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3]) @@ -267,20 +269,21 @@ def test_Cov3FactorKron(): L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) L3 = (cov.L[2]).eval(session=sess) - cov_np = np.kron(np.kron(np.dot(L1, L1.transpose()), - np.dot(L2, L2.transpose())), - np.dot(L3, L3.transpose())) + cov_np = np.kron( + np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose())), + np.dot(L3, L3.transpose()), + ) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_Cov3FactorMaskedKron(): - assert(m % 4 == 0) - dim1 = int(m/4) + assert m % 4 == 0 + dim1 = int(m / 4) dim2 = 2 dim3 = 2 @@ -299,18 +302,27 @@ def test_Cov3FactorMaskedKron(): L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) L3 = (cov.L[2]).eval(session=sess) - cov_np_factor = np.kron(L1, np.kron(L2, L3))[np.ix_(mask_indices, - mask_indices)] + cov_np_factor = np.kron(L1, np.kron(L2, L3))[ + np.ix_(mask_indices, mask_indices)] cov_np = np.dot(cov_np_factor, cov_np_factor.transpose()) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X[mask_indices, :], - cov_np) - - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol, - atol=atol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess)[ - np.ix_(mask_indices, mask_indices)], rtol=rtol, atol=atol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess)[ - mask_indices, :], rtol=rtol, atol=atol) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np( + X[mask_indices, :], cov_np) + + assert_allclose(logdet_np, cov.logdet.eval( + session=sess), rtol=rtol, atol=atol) + assert_allclose( + sinv_np, + cov.solve(eye).eval(session=sess)[ + np.ix_(mask_indices, mask_indices)], + rtol=rtol, + atol=atol, + ) + assert_allclose( + sinvx_np, + cov.solve(X_tf).eval(session=sess)[mask_indices, :], + rtol=rtol, + atol=atol, + ) def test_CovAR1(): @@ -325,13 +337,13 @@ def test_CovAR1(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) def test_CovAR1_scan_onsets(): - cov = CovAR1(size=m, scan_onsets=[0, m//2]) + cov = CovAR1(size=m, scan_onsets=[0, m // 2]) with tf.Session() as sess: # initialize the random covariance @@ -341,5 +353,5 @@ def test_CovAR1_scan_onsets(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), - rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf).eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index dbf07ff3c..23118e576 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -17,6 +17,7 @@ rtol = 1e-7 + def test_against_scipy_mvn_row(): with tf.Session() as sess: @@ -24,15 +25,14 @@ def test_against_scipy_mvn_row(): rowcov = CovUnconstrainedCholesky(size=m) colcov = CovIdentity(size=n) X = rmn(np.eye(m), np.eye(n)) - X_tf = tf.constant(X, 'float64') + X_tf = tf.constant(X, "float64") sess.run(tf.global_variables_initializer()) - rowcov_np = rowcov._cov.eval(session=sess) - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), - rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp(X_tf, rowcov, colcov) assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) @@ -44,13 +44,14 @@ def test_against_scipy_mvn_col(): rowcov = CovIdentity(size=m) colcov = CovUnconstrainedCholesky(size=n) X = rmn(np.eye(m), np.eye(n)) - X_tf = tf.constant(X, 'float64') + X_tf = tf.constant(X, "float64") sess.run(tf.global_variables_initializer()) colcov_np = colcov._cov.eval(session=sess) - scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), - colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X, + np.zeros([n]), + colcov_np)) tf_answer = matnorm_logp(X_tf, rowcov, colcov) assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 1ab7579ab..ad73d631d 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -4,8 +4,9 @@ import tensorflow as tf from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import ( - matnorm_logp_conditional_col, - matnorm_logp_conditional_row) + matnorm_logp_conditional_col, + matnorm_logp_conditional_row, +) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging @@ -24,7 +25,7 @@ def test_against_scipy_mvn_col_conditional(): # have to be careful for constructing everything as a submatrix of a big # PSD matrix, else no guarantee that anything's invertible. - cov_np = wishart.rvs(df=m+p+2, scale=np.eye(m+p)) + cov_np = wishart.rvs(df=m + p + 2, scale=np.eye(m + p)) # rowcov = CovConstant(cov_np[0:m, 0:m]) rowcov = CovUnconstrainedCholesky(Sigma=cov_np[0:m, 0:m]) @@ -36,8 +37,8 @@ def test_against_scipy_mvn_col_conditional(): X = rmn(np.eye(m), np.eye(n)) - A_tf = tf.constant(A, 'float64') - X_tf = tf.constant(X, 'float64') + A_tf = tf.constant(A, "float64") + X_tf = tf.constant(X, "float64") with tf.Session() as sess: @@ -45,11 +46,11 @@ def test_against_scipy_mvn_col_conditional(): Q_np = Q._cov.eval(session=sess) - rowcov_np = rowcov._cov.eval(session=sess) - \ + rowcov_np = rowcov._cov.eval(session=sess) -\ A.dot(np.linalg.inv(Q_np)).dot((A.T)) - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), - rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp_conditional_row(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) @@ -59,7 +60,7 @@ def test_against_scipy_mvn_row_conditional(): # have to be careful for constructing everything as a submatrix of a big # PSD matrix, else no guarantee that anything's invertible. - cov_np = wishart.rvs(df=m+p+2, scale=np.eye(m+p)) + cov_np = wishart.rvs(df=m + p + 2, scale=np.eye(m + p)) rowcov = CovIdentity(size=m) colcov = CovUnconstrainedCholesky(Sigma=cov_np[0:n, 0:n]) @@ -69,8 +70,8 @@ def test_against_scipy_mvn_row_conditional(): X = rmn(np.eye(m), np.eye(n)) - A_tf = tf.constant(A, 'float64') - X_tf = tf.constant(X, 'float64') + A_tf = tf.constant(A, "float64") + X_tf = tf.constant(X, "float64") with tf.Session() as sess: @@ -78,11 +79,11 @@ def test_against_scipy_mvn_row_conditional(): Q_np = Q._cov.eval(session=sess) - colcov_np = colcov._cov.eval(session=sess) - \ + colcov_np = colcov._cov.eval(session=sess) -\ A.T.dot(np.linalg.inv(Q_np)).dot((A)) - scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), - colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X, np.zeros([n]), colcov_np)) tf_answer = matnorm_logp_conditional_col(X_tf, rowcov, colcov, A_tf, Q) diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index ba1b1bc86..bbb219671 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -4,8 +4,9 @@ import tensorflow as tf from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import ( - matnorm_logp_marginal_col, - matnorm_logp_marginal_row) + matnorm_logp_marginal_col, + matnorm_logp_marginal_row, +) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky import logging @@ -30,8 +31,8 @@ def test_against_scipy_mvn_row_marginal(): X = rmn(np.eye(m), np.eye(n)) A = rmn(np.eye(m), np.eye(p)) - A_tf = tf.constant(A, 'float64') - X_tf = tf.constant(X, 'float64') + A_tf = tf.constant(A, "float64") + X_tf = tf.constant(X, "float64") with tf.Session() as sess: @@ -41,11 +42,11 @@ def test_against_scipy_mvn_row_marginal(): rowcov_np = rowcov._cov.eval(session=sess) + A.dot(Q_np).dot(A.T) - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), - rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, + np.zeros([m]), + rowcov_np)) - tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, - A_tf, Q) + tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) @@ -58,8 +59,8 @@ def test_against_scipy_mvn_col_marginal(): X = rmn(np.eye(m), np.eye(n)) A = rmn(np.eye(p), np.eye(n)) - A_tf = tf.constant(A, 'float64') - X_tf = tf.constant(X, 'float64') + A_tf = tf.constant(A, "float64") + X_tf = tf.constant(X, "float64") with tf.Session() as sess: @@ -69,9 +70,8 @@ def test_against_scipy_mvn_col_marginal(): colcov_np = colcov._cov.eval(session=sess) + A.T.dot(Q_np).dot(A) - scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), - colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X, np.zeros([n]), colcov_np)) - tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, - A_tf, Q) + tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 5ebdceb9e..8aef52a61 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -1,9 +1,11 @@ import numpy as np from scipy.stats import norm, wishart, pearsonr -from brainiak.matnormal.covs import (CovIdentity, - CovUnconstrainedCholesky, - CovUnconstrainedInvCholesky, - CovDiagonal) +from brainiak.matnormal.covs import ( + CovIdentity, + CovUnconstrainedCholesky, + CovUnconstrainedInvCholesky, + CovDiagonal, +) from brainiak.matnormal.regression import MatnormRegression from brainiak.matnormal.utils import rmn import logging @@ -25,7 +27,7 @@ def test_matnorm_regression_unconstrained(): B = norm.rvs(size=(n, p)) Y_hat = X.dot(B) rowcov_true = np.eye(m) - colcov_true = wishart.rvs(p+2, np.eye(p)) + colcov_true = wishart.rvs(p + 2, np.eye(p)) Y = Y_hat + rmn(rowcov_true, colcov_true) @@ -36,7 +38,7 @@ def test_matnorm_regression_unconstrained(): model.fit(X, Y) - assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol def test_matnorm_regression_unconstrainedprec(): @@ -47,7 +49,7 @@ def test_matnorm_regression_unconstrainedprec(): B = norm.rvs(size=(n, p)) Y_hat = X.dot(B) rowcov_true = np.eye(m) - colcov_true = wishart.rvs(p+2, np.eye(p)) + colcov_true = wishart.rvs(p + 2, np.eye(p)) Y = Y_hat + rmn(rowcov_true, colcov_true) @@ -58,7 +60,7 @@ def test_matnorm_regression_unconstrainedprec(): model.fit(X, Y) - assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol def test_matnorm_regression_optimizerChoice(): @@ -69,7 +71,7 @@ def test_matnorm_regression_optimizerChoice(): B = norm.rvs(size=(n, p)) Y_hat = X.dot(B) rowcov_true = np.eye(m) - colcov_true = wishart.rvs(p+2, np.eye(p)) + colcov_true = wishart.rvs(p + 2, np.eye(p)) Y = Y_hat + rmn(rowcov_true, colcov_true) @@ -81,7 +83,7 @@ def test_matnorm_regression_optimizerChoice(): model.fit(X, Y) - assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol def test_matnorm_regression_scaledDiag(): @@ -104,4 +106,4 @@ def test_matnorm_regression_scaledDiag(): model.fit(X, Y) - assert(pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol) + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index f69dc4845..6dd0bd18f 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -24,22 +24,14 @@ def gen_U_nips2016_example(): def gen_brsa_data_matnorm_model(U, n_T, n_V, space_cov, time_cov, n_nureg): n_C = U.shape[0] - beta = rmn(U, space_cov) - X = rmn(np.eye(n_T), np.eye(n_C)) - beta_0 = rmn(np.eye(n_nureg), space_cov) - X_0 = rmn(np.eye(n_T), np.eye(n_nureg)) - Y_hat = X.dot(beta) + X_0.dot(beta_0) - Y = Y_hat + rmn(time_cov, space_cov) - sizes = {"n_C": n_C, "n_T": n_T, "n_V": n_V} - - train = {"beta": beta, "X": X, "Y": Y, "U": U, 'X_0': X_0} + train = {"beta": beta, "X": X, "Y": Y, "U": U, "X_0": X_0} return train, sizes @@ -58,18 +50,22 @@ def test_brsa_rudimentary(): timecov_true = np.diag(np.abs(norm.rvs(size=(n_T)))) - tr, sz = gen_brsa_data_matnorm_model(U, n_T=n_T, n_V=n_V, n_nureg=n_nureg, - space_cov=spacecov_true, - time_cov=timecov_true) + tr, sz = gen_brsa_data_matnorm_model( + U, + n_T=n_T, + n_V=n_V, + n_nureg=n_nureg, + space_cov=spacecov_true, + time_cov=timecov_true, + ) spacecov_model = CovIdentity(size=n_V) timecov_model = CovDiagonal(size=n_T) - model_matnorm = MNRSA(time_cov=timecov_model, - space_cov=spacecov_model) + model_matnorm = MNRSA(time_cov=timecov_model, space_cov=spacecov_model) - model_matnorm.fit(tr['Y'], tr['X']) + model_matnorm.fit(tr["Y"], tr["X"]) - RMSE = np.mean((model_matnorm.C_ - cov2corr(tr['U']))**2)**0.5 + RMSE = np.mean((model_matnorm.C_ - cov2corr(tr["U"])) ** 2) ** 0.5 - assert(RMSE < 0.1) + assert RMSE < 0.1 From 231391bf5bd56103aaa2ece87d37148f52d949e3 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Tue, 7 Jan 2020 20:23:35 -0800 Subject: [PATCH 18/84] add metaclass reference --- brainiak/matnormal/covs.py | 154 ++++++++++++++++++++++++++++--------- 1 file changed, 116 insertions(+), 38 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 5e1f97f4f..29bffe069 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -26,11 +26,18 @@ ] -class CovBase(object): - """Base metaclass for noise covariances - """ +class CovBase(abc.ABC): + """Base metaclass for residual covariances. + For more on abstract classes, see + https://docs.python.org/3/library/abc.html + + Parameters + ---------- + + size: int + The size of the covariance matrix. - __metaclass__ = abc.ABCMeta + """ def __init__(self, size): self.size = size @@ -45,8 +52,9 @@ def get_optimize_vars(self): """ pass + @property def logdet(self): - """ log|Sigma| + """ log determinant of this covariance """ pass @@ -59,14 +67,14 @@ def solve(self, X): @property def _prec(self): """Expose the precision explicitly (mostly for testing / - visualization) + visualization, materializing large covariances may be intractable) """ return self.solve(tf.eye(self.size, dtype=tf.float64)) @property def _cov(self): """Expose the covariance explicitly (mostly for testing / - visualization) + visualization, materializing large covariances may be intractable) """ return tf.linalg.inv(self._prec) @@ -77,10 +85,13 @@ class CovIdentity(CovBase): def __init__(self, size): super(CovIdentity, self).__init__(size) - self.logdet = tf.constant(0.0, "float64") + + @property + def logdet(self): + return tf.constant(0.0, "float64") def get_optimize_vars(self): - """ Returns a list of tf variables that need to get optimized to + """Returns a list of tf variables that need to get optimized to fit this covariance """ return [] @@ -92,7 +103,19 @@ def solve(self, X): class CovAR1(CovBase): - """AR1 covariance + """AR(1) covariance parameterized by autoregressive parameter rho + and new noise sigma. + + Parameters + ---------- + size: int + size of covariance matrix + rho: float or None + initial value of autoregressive parameter (if None, initialize + randomly) + sigma: float or None + initial value of new noise parameter (if None, initialize randomly) + """ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): @@ -139,28 +162,33 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): else: self.rho_unc = tf.Variable(np.log(rho), name="rho") - # make logdet, first unconstrain rho and sigma + @property + def logdet(self): + """ log-determinant of this covariance + """ + # first, unconstrain rho and sigma rho = 2 * tf.sigmoid(self.rho_unc) - 1 sigma = tf.exp(self.log_sigma) # now compute logdet - self.logdet = tf.reduce_sum( + return tf.reduce_sum( 2 * tf.constant(self.run_sizes, dtype=tf.float64) * tf.log(sigma) - tf.log(1 - tf.square(rho)) ) - # precompute sigma_inv op - # Unlike BRSA we assume stationarity within block so no special case - # for first/last element of a block. This makes constructing this - # matrix easier. - # reprsimil.BRSA says (I - rho1 * D + rho1**2 * F) / sigma**2 - + @property + def _prec(self): + """Precision matrix corresponding to this AR(1) covariance. + Unlike BRSA we assume stationarity within block so no special case + for first/last element of a block. This makes constructing this + matrix easier. + reprsimil.BRSA says (I - rho1 * D + rho1**2 * F) / sigma**2 and we + use the same trick + """ rho = 2 * tf.sigmoid(self.rho_unc) - 1 sigma = tf.exp(self.log_sigma) - self.Sigma_inv = ( - self._identity_mat - - rho * self.offdiag_template - + rho ** 2 * self.diag_template - ) / tf.square(sigma) + + return (self._identity_mat - rho * self.offdiag_template + + rho ** 2 * self.diag_template) / tf.square(sigma) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to @@ -171,11 +199,19 @@ def get_optimize_vars(self): def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` """ - return tf.matmul(self.Sigma_inv, X) + return tf.matmul(self._prec, X) class CovIsotropic(CovBase): """Scaled identity (isotropic) noise covariance. + + Parameters + ---------- + size: int + size of covariance matrix + sigma: float or None + initial value of new noise parameter (if None, initialize randomly) + """ def __init__(self, size, sigma=None): @@ -186,11 +222,12 @@ def __init__(self, size, sigma=None): ) else: self.log_sigma = tf.Variable(np.log(sigma), name="sigma") - - self.logdet = self.size * self.log_sigma - self.sigma = tf.exp(self.log_sigma) + @property + def logdet(self): + return self.size * self.log_sigma + def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit this covariance @@ -199,6 +236,12 @@ def get_optimize_vars(self): def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + + Parameters + ---------- + X: tf.Tensor + Tensor to multiply by inverse of this covariance + """ return X / self.sigma @@ -217,10 +260,13 @@ def __init__(self, size, sigma=None): self.logprec = tf.Variable( np.log(1 / sigma), name="log-precisions") - self.logdet = -tf.reduce_sum(self.logprec) self.prec = tf.exp(self.logprec) self.prec_dimaugmented = tf.expand_dims(self.prec, -1) + @property + def logdet(self): + return -tf.reduce_sum(self.logprec) + def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit this covariance @@ -229,6 +275,12 @@ def get_optimize_vars(self): def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + + Parameters + ---------- + X: tf.Tensor + Tensor to multiply by inverse of this covariance + """ return tf.multiply(self.prec_dimaugmented, X) @@ -293,7 +345,9 @@ def __init__(self, size=None, Sigma=None): L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) ) - self.logdet = 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) + @property + def logdet(self): + return 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -302,9 +356,13 @@ def get_optimize_vars(self): return [self.L_full] def solve(self, X): - """ - Given this Sigma and some X, compute :math:`Sigma^{-1} * x` using - cholesky solve + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + using cholesky solve + Parameters + ---------- + X: tf.Tensor + Tensor to multiply by inverse of this covariance + """ return tf.cholesky_solve(self.L, X) @@ -314,6 +372,13 @@ class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): cholesky factor. Regularized using the trick from Chung et al. 2015 such that as the covariance approaches singularity, the likelihood goes to 0. + + References + ---------- + Chung, Y., Gelman, A., Rabe-Hesketh, S., Liu, J., & Dorie, V. (2015). + Weakly Informative Prior for Point Estimation of Covariance Matrices + in Hierarchical Models. Journal of Educational and Behavioral Statistics, + 40(2), 136–157. https://doi.org/10.3102/1076998615570945 """ def __init__(self, size, Sigma=None): @@ -331,7 +396,7 @@ class CovUnconstrainedInvCholesky(CovBase): """Unconstrained noise covariance parameterized in terms of its precision cholesky. Use this over the regular cholesky unless you have a good reason not to, since - you save a solve on every step. + this saves a cholesky solve on every step of optimization """ def __init__(self, size, invSigma=None): @@ -356,8 +421,10 @@ def __init__(self, size, invSigma=None): self.Linv = tf.matrix_set_diag( L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) ) - self.logdet = -2 * \ - tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) + + @property + def logdet(self): + return -2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -369,6 +436,12 @@ def solve(self, X): """ Given this Sigma and some X, compute :math:`Sigma^{-1} * x` using matmul (since we're parameterized by L_inv) + + Parameters + ---------- + X: tf.Tensor + Tensor to multiply by inverse of this covariance + """ return tf.matmul(x_tx(self.Linv), X) @@ -434,15 +507,14 @@ def __init__(self, sizes, Sigmas=None, mask=None): for mat in L_indeterminate ] - self.logdet = self._make_logdet() - def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit this covariance """ return self.L_full - def _make_logdet(self): + @property + def logdet(self): """ log|Sigma| using the diagonals of the cholesky factors. """ if self.mask is None: @@ -470,6 +542,12 @@ def solve(self, X): """ Given this Sigma and some X, compute Sigma^{-1} * x using traingular solves with the cholesky factors. Do 2 triangular solves - L L^T x = y as L z = y and L^T x = z + + Parameters + ---------- + X: tf.Tensor + Tensor to multiply by inverse of this covariance + """ if self.mask is None: z = tf_solve_lower_triangular_kron(self.L, X) From d2bb28510cce8f07da0a668ba42db3aacbd01337 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Tue, 7 Jan 2020 21:49:56 -0800 Subject: [PATCH 19/84] further cleanup post refactor, doc changes, addressing minor comments --- brainiak/matnormal/__init__.py | 4 +- brainiak/matnormal/covs.py | 62 +++++--- brainiak/matnormal/matnormal_likelihoods.py | 136 ++++++++++++++---- brainiak/matnormal/mnrsa.py | 29 ++-- brainiak/matnormal/regression.py | 27 +--- brainiak/matnormal/utils.py | 2 +- brainiak/utils/kronecker_solvers.py | 17 +-- tests/matnormal/test_cov.py | 4 +- .../test_matnormal_logp_conditional.py | 4 +- 9 files changed, 185 insertions(+), 100 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index 41b2ff24b..d9fc27056 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -58,8 +58,8 @@ The `brainiak.matnormal` package provides structure to infer models that can be stated in the matrix-normal notation that are useful for fMRI analysis. -It provides a few interfaces. `MatnormModelBase` is intended to be subclasses -from by matrix-variate models. It provides a wrapper for the tensorflow +It provides a few interfaces. `MatnormModelBase` is intended is intended as a +base class for matrix-variate models. It provides a wrapper for the tensorflow optimizer that provides convergence checks based on thresholds on the function value and gradient, and simple verbose outputs. It also provides an interface for noise covariances (`CovBase`). Any class that follows the interface diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 29bffe069..c0364320a 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -28,9 +28,9 @@ class CovBase(abc.ABC): """Base metaclass for residual covariances. - For more on abstract classes, see + For more on abstract classes, see https://docs.python.org/3/library/abc.html - + Parameters ---------- @@ -101,6 +101,20 @@ def solve(self, X): """ return X + @property + def _prec(self): + """Expose the precision explicitly (mostly for testing / + visualization, materializing large covariances may be intractable) + """ + return tf.eye(self.size, dtype=tf.float64) + + @property + def _cov(self): + """Expose the covariance explicitly (mostly for testing / + visualization, materializing large covariances may be intractable) + """ + return tf.eye(self.size, dtype=tf.float64) + class CovAR1(CovBase): """AR(1) covariance parameterized by autoregressive parameter rho @@ -160,7 +174,8 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): tf.random_normal([1], dtype=tf.float64), name="rho" ) else: - self.rho_unc = tf.Variable(np.log(rho), name="rho") + self.rho_unc = tf.Variable( + 2 * tf.sigmoid(self.rho_unc) - 1, name="rho") @property def logdet(self): @@ -178,7 +193,7 @@ def logdet(self): @property def _prec(self): """Precision matrix corresponding to this AR(1) covariance. - Unlike BRSA we assume stationarity within block so no special case + We assume stationarity within block so no special case for first/last element of a block. This makes constructing this matrix easier. reprsimil.BRSA says (I - rho1 * D + rho1**2 * F) / sigma**2 and we @@ -209,30 +224,30 @@ class CovIsotropic(CovBase): ---------- size: int size of covariance matrix - sigma: float or None - initial value of new noise parameter (if None, initialize randomly) + var: float or None + initial value of new variance parameter (if None, initialize randomly) """ - def __init__(self, size, sigma=None): + def __init__(self, size, var=None): super(CovIsotropic, self).__init__(size) - if sigma is None: - self.log_sigma = tf.Variable( + if var is None: + self.log_var = tf.Variable( tf.random_normal([1], dtype=tf.float64), name="sigma" ) else: - self.log_sigma = tf.Variable(np.log(sigma), name="sigma") - self.sigma = tf.exp(self.log_sigma) + self.log_var = tf.Variable(np.log(var), name="sigma") + self.var = tf.exp(self.log_var) @property def logdet(self): - return self.size * self.log_sigma + return self.size * self.log_var def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit this covariance """ - return [self.log_sigma] + return [self.log_var] def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` @@ -243,22 +258,31 @@ def solve(self, X): Tensor to multiply by inverse of this covariance """ - return X / self.sigma + return X / self.var class CovDiagonal(CovBase): """Uncorrelated (diagonal) noise covariance + + Parameters + ---------- + size: int + size of covariance matrix + diag_var: float or None + initial value of (diagonal) variance vector (if None, initialize + randomly) + """ - def __init__(self, size, sigma=None): + def __init__(self, size, diag_var=None): super(CovDiagonal, self).__init__(size) - if sigma is None: + if diag_var is None: self.logprec = tf.Variable( tf.random_normal([size], dtype=tf.float64), name="precisions" ) else: self.logprec = tf.Variable( - np.log(1 / sigma), name="log-precisions") + np.log(1 / diag_var), name="log-precisions") self.prec = tf.exp(self.logprec) self.prec_dimaugmented = tf.expand_dims(self.prec, -1) @@ -347,7 +371,9 @@ def __init__(self, size=None, Sigma=None): @property def logdet(self): - return 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.L))) + + # We save a log here by using the diag of L_full + return 2 * tf.reduce_sum((tf.matrix_diag_part(self.L_full))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 25cbf7e99..0022d0347 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -7,7 +7,17 @@ def _condition(X): """ - Condition number, used for diagnostics + Condition number (https://en.wikipedia.org/wiki/Condition_number) + used for diagnostics. + + NOTE: this formulation is only defined for symmetric positive definite + matrices (which covariances should be, and what we're using this for) + + Parameters + ---------- + X: tf.Tensor + Symmetric tensor to compute condition number of + """ s = tf.svd(X, compute_uv=False) return tf.reduce_max(s) / tf.reduce_min(s) @@ -18,13 +28,25 @@ def solve_det_marginal(x, sigma, A, Q): Use matrix inversion lemma for the solve: .. math:: (\\Sigma + AQA')^{-1} X =\\ - \\Sigma^{-1} - \\Sigma^{-1} A (Q^{-1} + - A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1} + (\\Sigma^{-1} - \\Sigma^{-1} A (Q^{-1} + + A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X Use matrix determinant lemma for determinant: .. math:: \\log|(\\Sigma + AQA')| = \\log|Q^{-1} + A' \\Sigma^{-1} A| + \\log|Q| + \\log|\\Sigma| + + Parameters + ---------- + x: tf.Tensor + Tensor to multiply the solve by + sigma: brainiak.matnormal.CovBase + Covariance object implementing solve and logdet + A: tf.Tensor + Factor multiplying the variable we marginalized out + Q: brainiak.matnormal.CovBase + Covariance object of marginalized variable, + implementing solve and logdet """ # For diagnostics, we want to check condition numbers @@ -77,13 +99,26 @@ def solve_det_conditional(x, sigma, A, Q): Use matrix inversion lemma for the solve: .. math:: (\\Sigma - AQ^{-1}A')^{-1} X =\\ - \\Sigma^{-1} + \\Sigma^{-1} A (Q - - A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1} X + (\\Sigma^{-1} + \\Sigma^{-1} A (Q - + A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X Use matrix determinant lemma for determinant: .. math:: \\log|(\\Sigma - AQ^{-1}A')| = \\log|Q - A' \\Sigma^{-1} A| - \\log|Q| + \\log|\\Sigma| + + Parameters + ---------- + x: tf.Tensor + Tensor to multiply the solve by + sigma: brainiak.matnormal.CovBase + Covariance object implementing solve and logdet + A: tf.Tensor + Factor multiplying the variable we conditioned on + Q: brainiak.matnormal.CovBase + Covariance object of conditioning variable, + implementing solve and logdet + """ # (Q - A' Sigma^{-1} A) @@ -112,6 +147,22 @@ def _mnorm_logp_internal( colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col ): """Construct logp from the solves and determinants. + + Parameters + ---------------- + colsize: int + Column dimnesion of observation tensor + rowsize: int + Row dimension of observation tensor + logdet_row: tf.Tensor (scalar) + log-determinant of row covariance + logdet_col: tf.Tensor (scalar) + log-determinant of column covariance + solve_row: tf.Tensor + Inverse row covariance multiplying the observation tensor + solve_col + Inverse column covariance multiplying the transpose of + the observation tensor """ log2pi = 1.8378770664093453 @@ -130,6 +181,16 @@ def _mnorm_logp_internal( def matnorm_logp(x, row_cov, col_cov): """Log likelihood for centered matrix-variate normal density. Assumes that row_cov and col_cov follow the API defined in CovBase. + + Parameters + ---------------- + x: tf.Tensor + Observation tensor + row_cov: CovBase + Row covariance implementing the CovBase API + col_cov: CovBase + Column Covariance implementing the CovBase API + """ rowsize = tf.cast(tf.shape(x)[0], "float64") @@ -150,15 +211,30 @@ def matnorm_logp(x, row_cov, col_cov): def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): """ - Log likelihood for centered matrix-variate normal density. - Assumes that row_cov, col_cov, and marg_cov follow the API defined - in CovBase. + Log likelihood for marginal centered matrix-variate normal density. - When you marginalize in mnorm, you end up with a covariance S + APA', - where P is the covariance of A in the relevant dimension. + Given: + .. math:: + X \\sim \\mathcal{MN}(0, Q, C)\\ + Y \\mid \\X \\sim \\mathcal{MN}(AX, R, C),\\ + Y \\sim \\mathcal{MN}(0, R + AQA, C) + + This function efficiently computes the marginals by unpacking some + info in the covariance classes and then dispatching to solve_det_marginal. + + Parameters + --------------- + x: tf.Tensor + Observation tensor + row_cov: CovBase + Row covariance implementing the CovBase API + col_cov: CovBase + Column Covariance implementing the CovBase API + marg: tf.Tensor + Marginal factor + marg_cov: CovBase + Prior covariance implementing the CovBase API - This method exploits the matrix inversion and determinant lemmas to - construct S + APA' given the covariance API in in CovBase. """ rowsize = tf.cast(tf.shape(x)[0], "float64") colsize = tf.cast(tf.shape(x)[1], "float64") @@ -175,14 +251,29 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): """ - Log likelihood for centered matrix-variate normal density. Assumes that - row_cov, col_cov, and marg_cov follow the API defined in CovBase. + Log likelihood for centered marginal matrix-variate normal density. - When you marginalize in mnorm, you end up with a covariance S + APA', - where P is the covariance of A in the relevant dimension. + .. math:: + X \\sim \\mathcal{MN}(0, R, Q)\\ + Y \\mid \\X \\sim \\mathcal{MN}(XA, R, C),\\ + Y \\sim \\mathcal{MN}(0, R, C + AQA) + + This function efficiently computes the marginals by unpacking some + info in the covariance classes and then dispatching to solve_det_marginal. + + Parameters + --------------- + x: tf.Tensor + Observation tensor + row_cov: CovBase + Row covariance implementing the CovBase API + col_cov: CovBase + Column Covariance implementing the CovBase API + marg: tf.Tensor + Marginal factor + marg_cov: CovBase + Prior covariance implementing the CovBase API - This method exploits the matrix inversion and determinant lemmas to - construct S + APA' given the covariance API in in CovBase. """ rowsize = tf.cast(tf.shape(x)[0], "float64") colsize = tf.cast(tf.shape(x)[1], "float64") @@ -201,15 +292,6 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): """ - Log likelihood for centered matrix-variate normal density. Assumes that - row_cov, col_cov, and cond_cov follow the API defined in CovBase. - - When you go from joint to conditional in mnorm, you end up with a - covariance S - APA', where P is the covariance of A in the relevant - dimension. - - This method exploits the matrix inversion and determinant lemmas to - construct S - APA' given the covariance API in in CovBase. """ rowsize = tf.cast(tf.shape(x)[0], "float64") diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 58ef9da16..28ea0e616 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -15,26 +15,19 @@ class MNRSA(BaseEstimator): """ Matrix normal version of RSA. The goal of this analysis is to find the covariance of the mapping from - some design matrixX to the fMRI signal Y. It does so by marginalizing over + some design matrix X to the fMRI signal Y. It does so by marginalizing over the actual mapping (i.e. averaging over the uncertainty in it), which happens to correct a bias imposed by structure in the design matrix on the RSA estimate (see Cai et al., NIPS 2016). - This implementation makes different choices about two things relative to - `brainiak.reprsimil.BRSA`: - - 1. The noise covariance is assumed to be kronecker-separable. Informally, - this means that all voxels has the same temporal covariance, and all time - points have the same spatialcovariance. This is in contrast to BRSA, which - allows different temporal covariance for each voxel. On the other hand, - computational efficiencies enabled by this choice allow MNRSA to - support a richer class of space and time covariances (anything in - `brainiak.matnormal.covs`). - - 2. MNRSA does not estimate the nuisance timecourse X_0. Instead, - we expect the temporal noise covariance to capture the same property - (because when marginalizing over B_0 gives a low-rank component - to the noise covariance, something we hope to have available soon. + This implementation makes different choices about residual covariance + relative to `brainiak.reprsimil.BRSA`: Here, the noise covariance is + assumed to be kronecker-separable. Informally, this means that all voxels + have the same temporal covariance, and all time points have the same + spatial covariance. This is in contrast to BRSA, which allows different + temporal covariance for each voxel. On the other hand, computational + efficiencies enabled by this choice allow MNRSA to support a richer class + of space and time covariances (anything in `brainiak.matnormal.covs`). For users: in general, if you are worried about voxels each having different temporal noise structure,you should use @@ -94,9 +87,9 @@ def fit(self, X, y, structured_RSA_cov=None): Parameters ---------- X: 2d array - Brain data matrix (voxels by TRs). Y in the math + Brain data matrix (TRs by voxels). Y in the math y: 2d array or vector - Behavior data matrix (behavioral obsevations by TRs). X in the math + Behavior data matrix (TRs by behavioral obsevations). X in the math max_iter: int, default=1000 Maximum number of iterations to run step: int, default=100 diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index f6a670636..cba1364c8 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -20,8 +20,11 @@ class MatnormRegression(BaseEstimator): TR noise covariance class following CovBase interface. space_cov : subclass of CovBase Voxel noise covariance class following CovBase interface. - learnRate : real, default=0.01 - Step size for the Adam optimizer + optimizer : string, default="L-BFGS-B" + Scipy optimizer to use. For other options, see "method" argument + in https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize + optCtrl: dict, default=None + Additional arguments to pass to scipy.optimize.minimize. """ @@ -59,26 +62,6 @@ def fit(self, X, y): Design matrix Y : np.array, TRs by voxels. fMRI data - voxel_pos: np.array, n_voxels by 3, default: None - Spatial positions of voxels (optional). - If provided, and if space_cov is a CovGP, the positions - for computing the GP covaraince matrix. Otherwise CovGP - defaults to distances of 1 unit between all voxels. - Ignored by non-GP noise covariances. - times : np.array, TRs by 1, default:None - Timestamps of observations (optional). - If provided, and if time_cov is a CovGP, the the times - for computing the GP covaraince matrix. Otherwise CovGP - defaults to distances of 1 unit between all times. - Ignored by non-GP noise covariances. - max_iter: int, default=1000 - Maximum number of iterations to run - step: int, default=100 - Number of steps between optimizer status outputs. - restart: bool, default=True - If this is true, optimizer is restarted (e.g. for a new dataset). - Otherwise optimizer will continue from where it is now (for example - for running more iterations if the initial number was not enough). """ self.n_c = X.shape[1] diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 62c486159..d7ab25aa5 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -5,7 +5,7 @@ def rmn(rowcov, colcov): # generate random draws from a zero-mean matrix-normal distribution - Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0]))) + Z = norm.rvs(size=(rowcov.shape[0], colcov.shape[0])) return cholesky(rowcov).dot(Z).dot(cholesky(colcov)) diff --git a/brainiak/utils/kronecker_solvers.py b/brainiak/utils/kronecker_solvers.py index f65e92b9f..c3b4752cf 100644 --- a/brainiak/utils/kronecker_solvers.py +++ b/brainiak/utils/kronecker_solvers.py @@ -7,7 +7,7 @@ def tf_solve_lower_triangular_kron(L, y): - """ Tensor flow function to solve L x = y + """ Tensorflow function to solve L x = y where L = kron(L[0], L[1] .. L[n-1]) and L[i] are the lower triangular matrices @@ -53,7 +53,7 @@ def tf_solve_lower_triangular_kron(L, y): def tf_solve_upper_triangular_kron(L, y): - """ Tensor flow function to solve L^T x = y + """ Tensorflow function to solve L^T x = y where L = kron(L[0], L[1] .. L[n-1]) and L[i] are the lower triangular matrices @@ -142,7 +142,7 @@ def tf_kron_mult(L, x): def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): - """ Tensor flow function to solve L x = y + """ Tensorflow function to solve L x = y where L is a lower triangular matrix with a mask Arguments @@ -161,7 +161,7 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): True if L is lower triangular, False if upper triangular adjoint : boolean (default : False) - True if solving for L^x = y, False if solving for Lx = y + True if solving for L^T x = y, False if solving for Lx = y Returns ------- @@ -187,9 +187,10 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): def tf_solve_lower_triangular_masked_kron(L, y, mask): - """ Tensor flow function to solve L x = y - where L = kron(L[0], L[1] .. L[n-1]) - and L[i] are the lower triangular matrices + """ Tensorflow function to solve L x = y + where L = kron(L[0], L[1] .. L[n-1]), + L[i] are the lower triangular matrices, + and mask is a binary elementwise mask on the full L Arguments --------- @@ -251,7 +252,7 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): def tf_solve_upper_triangular_masked_kron(L, y, mask): - """ Tensor flow function to solve L^T x = y + """ Tensorflow function to solve L^T x = y where L = kron(L[0], L[1] .. L[n-1]) and L[i] are the lower triangular matrices diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 4234e6f03..22e216da5 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -108,7 +108,7 @@ def test_CovIsotropic(): # initialize the random covariance sess.run(tf.variables_initializer(cov.get_optimize_vars())) # compute the naive version - cov_np = cov.sigma.eval(session=sess) * np.eye(cov.size) + cov_np = cov._cov.eval(session=sess) * np.eye(cov.size) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) @@ -135,7 +135,7 @@ def test_CovDiagonal(): def test_CovDiagonal_initialized(): cov_np = np.diag(np.exp(np.random.normal(size=m))) - cov = CovDiagonal(size=m, sigma=np.diag(cov_np)) + cov = CovDiagonal(size=m, diag_var=np.diag(cov_np)) with tf.Session() as sess: # initialize the random covariance diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index ad73d631d..8b83e0828 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -21,7 +21,7 @@ rtol = 1e-7 -def test_against_scipy_mvn_col_conditional(): +def test_against_scipy_mvn_row_conditional(): # have to be careful for constructing everything as a submatrix of a big # PSD matrix, else no guarantee that anything's invertible. @@ -56,7 +56,7 @@ def test_against_scipy_mvn_col_conditional(): assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) -def test_against_scipy_mvn_row_conditional(): +def test_against_scipy_mvn_col_conditional(): # have to be careful for constructing everything as a submatrix of a big # PSD matrix, else no guarantee that anything's invertible. From 79092eef004df3a681037ec77c5971a26474b1e8 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sun, 1 Mar 2020 18:32:33 -0800 Subject: [PATCH 20/84] linter and deprecation fixes, rename Matnorm to Matnormal in one spot --- brainiak/matnormal/covs.py | 36 +++++++++++--------- brainiak/matnormal/matnormal_likelihoods.py | 27 ++++++++------- brainiak/matnormal/mnrsa.py | 30 +++++++--------- brainiak/matnormal/regression.py | 6 ++-- brainiak/matnormal/utils.py | 2 +- tests/matnormal/test_matnormal_regression.py | 10 +++--- 6 files changed, 55 insertions(+), 56 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index c0364320a..29601aa93 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -164,14 +164,14 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): if sigma is None: self.log_sigma = tf.Variable( - tf.random_normal([1], dtype=tf.float64), name="sigma" + tf.random.normal([1], dtype=tf.float64), name="sigma" ) else: self.log_sigma = tf.Variable(np.log(sigma), name="sigma") if rho is None: self.rho_unc = tf.Variable( - tf.random_normal([1], dtype=tf.float64), name="rho" + tf.random.normal([1], dtype=tf.float64), name="rho" ) else: self.rho_unc = tf.Variable( @@ -186,8 +186,9 @@ def logdet(self): sigma = tf.exp(self.log_sigma) # now compute logdet return tf.reduce_sum( - 2 * tf.constant(self.run_sizes, dtype=tf.float64) * tf.log(sigma) - - tf.log(1 - tf.square(rho)) + 2 * tf.constant(self.run_sizes, dtype=tf.float64) * + tf.math.log(sigma) + - tf.math.log(1 - tf.square(rho)) ) @property @@ -233,7 +234,7 @@ def __init__(self, size, var=None): super(CovIsotropic, self).__init__(size) if var is None: self.log_var = tf.Variable( - tf.random_normal([1], dtype=tf.float64), name="sigma" + tf.random.normal([1], dtype=tf.float64), name="sigma" ) else: self.log_var = tf.Variable(np.log(var), name="sigma") @@ -278,7 +279,7 @@ def __init__(self, size, diag_var=None): super(CovDiagonal, self).__init__(size) if diag_var is None: self.logprec = tf.Variable( - tf.random_normal([size], dtype=tf.float64), name="precisions" + tf.random.normal([size], dtype=tf.float64), name="precisions" ) else: self.logprec = tf.Variable( @@ -343,7 +344,7 @@ def __init__(self, size=None, Sigma=None): if Sigma is None: self.L_full = tf.Variable( - tf.random_normal([size, size], dtype=tf.float64), + tf.random.normal([size, size], dtype=tf.float64), name="L_full", dtype="float64", ) @@ -366,14 +367,14 @@ def __init__(self, size=None, Sigma=None): L_indeterminate = tf.linalg.band_part(self.L_full, -1, 0) self.L = tf.matrix_set_diag( - L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) + L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) ) @property def logdet(self): # We save a log here by using the diag of L_full - return 2 * tf.reduce_sum((tf.matrix_diag_part(self.L_full))) + return 2 * tf.reduce_sum((tf.linalg.diag_part(self.L_full))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -390,7 +391,7 @@ def solve(self, X): Tensor to multiply by inverse of this covariance """ - return tf.cholesky_solve(self.L, X) + return tf.linalg.cholesky_solve(self.L, X) class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): @@ -430,7 +431,7 @@ def __init__(self, size, invSigma=None): if invSigma is None: self.Linv_full = tf.Variable( - tf.random_normal([size, size], dtype=tf.float64), + tf.random.normal([size, size], dtype=tf.float64), name="Linv_full") else: self.Linv_full = tf.Variable( @@ -445,12 +446,12 @@ def __init__(self, size, invSigma=None): # it's positive. L_indeterminate = tf.linalg.band_part(self.Linv_full, -1, 0) self.Linv = tf.matrix_set_diag( - L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) + L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) ) @property def logdet(self): - return -2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Linv))) + return -2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(self.Linv))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -512,7 +513,7 @@ def __init__(self, sizes, Sigmas=None, mask=None): if Sigmas is None: self.L_full = [ tf.Variable( - tf.random_normal([sizes[i], sizes[i]], dtype=tf.float64), + tf.random.normal([sizes[i], sizes[i]], dtype=tf.float64), name="L" + str(i) + "_full", ) for i in range(self.nfactors) @@ -529,7 +530,7 @@ def __init__(self, sizes, Sigmas=None, mask=None): L_indeterminate = [tf.linalg.band_part( mat, -1, 0) for mat in self.L_full] self.L = [ - tf.matrix_set_diag(mat, tf.exp(tf.matrix_diag_part(mat))) + tf.matrix_set_diag(mat, tf.exp(tf.linalg.diag_part(mat))) for mat in L_indeterminate ] @@ -548,7 +549,8 @@ def logdet(self): for mat in self.L]) n_prod = tf.reduce_prod(n_list) logdet = tf.stack( - [tf.reduce_sum(tf.log(tf.diag_part(mat))) for mat in self.L] + [tf.reduce_sum(tf.math.log(tf.diag_part(mat))) + for mat in self.L] ) logdetfinal = tf.reduce_sum((logdet * n_prod) / n_list) else: @@ -558,7 +560,7 @@ def logdet(self): for i in range(self.nfactors): indices = list(range(self.nfactors)) indices.remove(i) - logdet += tf.log(tf.diag_part(self.L[i])) * tf.to_double( + logdet += tf.math.log(tf.diag_part(self.L[i])) * tf.to_double( tf.reduce_sum(mask_reshaped, indices) ) logdetfinal = tf.reduce_sum(logdet) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 0022d0347..26eb9ec64 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -53,7 +53,8 @@ def solve_det_marginal(x, sigma, A, Q): # of things we invert. This includes Q and Sigma, as well # as the "lemma factor" for lack of a better definition if logging.getLogger().isEnabledFor(logging.DEBUG): - logging.log("Printing diagnostics for solve_det_marginal") + logging.log(logging.DEBUG, + "Printing diagnostics for solve_det_marginal") A = tf.Print(A, [_condition(Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True))], "lemma_factor condition") @@ -64,13 +65,13 @@ def solve_det_marginal(x, sigma, A, Q): # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like # a schur complement by isn't, so we call it the "lemma factor" # since we use it in woodbury and matrix determinant lemmas - lemma_factor = tf.cholesky(Q._prec + tf.matmul(A, sigma.solve(A), - transpose_a=True)) + lemma_factor = tf.linalg.cholesky(Q._prec + tf.matmul(A, sigma.solve(A), + transpose_a=True)) logdet = ( Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) + + 2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(lemma_factor))) ) if logging.getLogger().isEnabledFor(logging.DEBUG): @@ -78,14 +79,14 @@ def solve_det_marginal(x, sigma, A, Q): logdet = tf.Print(logdet, [sigma.logdet], "sigma logdet") logdet = tf.Print( logdet, - [2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor)))], + [2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(lemma_factor)))], "iqf logdet", ) # A' Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Qinv + A' Sigma^{-1} A)^{-1} A' Sigma^{-1} - prod_term = tf.cholesky_solve(lemma_factor, Atrp_Sinv) + prod_term = tf.linalg.cholesky_solve(lemma_factor, Atrp_Sinv) solve = tf.matmul( sigma.solve(scaled_I(1.0, sigma.size) - tf.matmul(A, prod_term)), x @@ -122,19 +123,19 @@ def solve_det_conditional(x, sigma, A, Q): """ # (Q - A' Sigma^{-1} A) - lemma_factor = tf.cholesky( + lemma_factor = tf.linalg.cholesky( Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True)) logdet = ( -Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(tf.log(tf.matrix_diag_part(lemma_factor))) + + 2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(lemma_factor))) ) # A' Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Q - A' Sigma^{-1} A)^{-1} A' Sigma^{-1} - prod_term = tf.cholesky_solve(lemma_factor, Atrp_Sinv) + prod_term = tf.linalg.cholesky_solve(lemma_factor, Atrp_Sinv) solve = tf.matmul( sigma.solve(scaled_I(1.0, sigma.size) + tf.matmul(A, prod_term)), x @@ -167,14 +168,16 @@ def _mnorm_logp_internal( log2pi = 1.8378770664093453 if logging.getLogger().isEnabledFor(logging.DEBUG): - solve_row = tf.Print(solve_row, [tf.trace(solve_col)], "coltrace") - solve_row = tf.Print(solve_row, [tf.trace(solve_row)], "rowtrace") + solve_row = tf.Print( + solve_row, [tf.linalg.trace(solve_col)], "coltrace") + solve_row = tf.Print( + solve_row, [tf.linalg.trace(solve_row)], "rowtrace") solve_row = tf.Print(solve_row, [logdet_row], "logdet_row") solve_row = tf.Print(solve_row, [logdet_col], "logdet_col") denominator = (-rowsize * colsize * log2pi - colsize * logdet_row - rowsize * logdet_col) - numerator = -tf.trace(tf.matmul(solve_col, solve_row)) + numerator = -tf.linalg.trace(tf.matmul(solve_col, solve_row)) return 0.5 * (numerator + denominator) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 28ea0e616..adbe6ac81 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -6,7 +6,7 @@ import numpy as np from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row from tensorflow.contrib.opt import ScipyOptimizerInterface -import logging +import tensorflow.compat.v1.logging as tflog __all__ = ["MNRSA"] @@ -67,7 +67,7 @@ def __init__(self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", self.Y = tf.placeholder(tf.float64, [self.n_T, self.n_V], name="Brain") self.X_0 = tf.Variable( - tf.random_normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0" + tf.random.normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0" ) self.train_variables = [self.X_0] @@ -117,9 +117,9 @@ def fit(self, X, y, structured_RSA_cov=None): self.naive_C_ = cov2corr(self.naive_U_) self.L_full = tf.Variable(naiveRSA_L, name="L_full", dtype="float64") - L_indeterminate = tf.matrix_band_part(self.L_full, -1, 0) + L_indeterminate = tf.linalg.band_part(self.L_full, -1, 0) self.L = tf.matrix_set_diag( - L_indeterminate, tf.exp(tf.matrix_diag_part(L_indeterminate)) + L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) ) self.train_variables.extend([self.L_full]) @@ -134,20 +134,14 @@ def fit(self, X, y, structured_RSA_cov=None): options=self.optCtrl, ) - if logging.getLogger().isEnabledFor(logging.INFO): - optimizer._packed_loss_grad = tf.Print( - optimizer._packed_loss_grad, - [tf.reduce_min(optimizer._packed_loss_grad)], - "mingrad", - ) - optimizer._packed_loss_grad = tf.Print( - optimizer._packed_loss_grad, - [tf.reduce_max(optimizer._packed_loss_grad)], - "maxgrad", - ) - optimizer._packed_loss_grad = tf.Print( - optimizer._packed_loss_grad, [self.logp()], "logp" - ) + logging_ops = [] + logging_ops.append(tf.print("min(grad): ", tf.reduce_min( + optimizer._packed_loss_grad), output_stream=tflog.info)) + logging_ops.append(tf.print("max(grad): ", tf.reduce_max( + optimizer._packed_loss_grad), output_stream=tflog.info)) + logging_ops.append( + tf.print("logp", self.logp(), output_stream=tflog.info)) + self.sess.run(logging_ops, feed_dict=feed_dict) optimizer.minimize(session=self.sess, feed_dict=feed_dict) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index cba1364c8..6c16ff47e 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -4,10 +4,10 @@ from brainiak.matnormal.matnormal_likelihoods import matnorm_logp from tensorflow.contrib.opt import ScipyOptimizerInterface -__all__ = ["MatnormRegression"] +__all__ = ["MatnormalRegression"] -class MatnormRegression(BaseEstimator): +class MatnormalRegression(BaseEstimator): """ This analysis allows maximum likelihood estimation of regression models in the presence of both spatial and temporal covariance. @@ -22,7 +22,7 @@ class MatnormRegression(BaseEstimator): Voxel noise covariance class following CovBase interface. optimizer : string, default="L-BFGS-B" Scipy optimizer to use. For other options, see "method" argument - in https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize + of scipy.optimize.minimize optCtrl: dict, default=None Additional arguments to pass to scipy.optimize.minimize. diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index d7ab25aa5..30f8f18b4 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -26,7 +26,7 @@ def quad_form(x, y): def scaled_I(x, size): """ x * I_{size} """ - return tf.diag(tf.ones([size], dtype=tf.float64) * x) + return tf.linalg.tensor_diag(tf.ones([size], dtype=tf.float64) * x) def quad_form_trp(x, y): diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 8aef52a61..9fa9f9cca 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -6,7 +6,7 @@ CovUnconstrainedInvCholesky, CovDiagonal, ) -from brainiak.matnormal.regression import MatnormRegression +from brainiak.matnormal.regression import MatnormalRegression from brainiak.matnormal.utils import rmn import logging @@ -34,7 +34,7 @@ def test_matnorm_regression_unconstrained(): row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedCholesky(size=p) - model = MatnormRegression(time_cov=row_cov, space_cov=col_cov) + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) model.fit(X, Y) @@ -56,7 +56,7 @@ def test_matnorm_regression_unconstrainedprec(): row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedInvCholesky(size=p) - model = MatnormRegression(time_cov=row_cov, space_cov=col_cov) + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) model.fit(X, Y) @@ -78,7 +78,7 @@ def test_matnorm_regression_optimizerChoice(): row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedInvCholesky(size=p) - model = MatnormRegression(time_cov=row_cov, space_cov=col_cov, + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov, optimizer="CG") model.fit(X, Y) @@ -102,7 +102,7 @@ def test_matnorm_regression_scaledDiag(): row_cov = CovIdentity(size=m) col_cov = CovDiagonal(size=p) - model = MatnormRegression(time_cov=row_cov, space_cov=col_cov) + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) model.fit(X, Y) From 1fedd0c562709c1a003bbfa150a576054c85cde9 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 2 Mar 2020 22:58:41 -0800 Subject: [PATCH 21/84] fix missing comma --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3cc4a4b9d..5323f031b 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,7 @@ def finalize_options(self): 'psutil', 'nibabel', 'typing', - 'tensorflow' + 'tensorflow', 'joblib', 'wheel', # See https://github.com/astropy/astropy-helpers/issues/501 ], From 87eb10dfb56f1552407f1f606d41bd793177fc99 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 2 Mar 2020 23:44:15 -0800 Subject: [PATCH 22/84] strict linter fixes --- brainiak/matnormal/matnormal_likelihoods.py | 23 ++++++++++---------- tests/matnormal/test_matnormal_regression.py | 2 +- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 26eb9ec64..3e9100212 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -1,4 +1,5 @@ import tensorflow as tf +from tensorflow import linalg as tlinalg from .utils import scaled_I import logging @@ -65,13 +66,13 @@ def solve_det_marginal(x, sigma, A, Q): # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like # a schur complement by isn't, so we call it the "lemma factor" # since we use it in woodbury and matrix determinant lemmas - lemma_factor = tf.linalg.cholesky(Q._prec + tf.matmul(A, sigma.solve(A), - transpose_a=True)) + lemma_factor = tlinalg.cholesky(Q._prec + tf.matmul(A, sigma.solve(A), + transpose_a=True)) logdet = ( Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(lemma_factor))) + + 2 * tf.reduce_sum(tf.math.log(tlinalg.diag_part(lemma_factor))) ) if logging.getLogger().isEnabledFor(logging.DEBUG): @@ -79,14 +80,14 @@ def solve_det_marginal(x, sigma, A, Q): logdet = tf.Print(logdet, [sigma.logdet], "sigma logdet") logdet = tf.Print( logdet, - [2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(lemma_factor)))], + [2 * tf.reduce_sum(tf.math.log(tlinalg.diag_part(lemma_factor)))], "iqf logdet", ) # A' Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Qinv + A' Sigma^{-1} A)^{-1} A' Sigma^{-1} - prod_term = tf.linalg.cholesky_solve(lemma_factor, Atrp_Sinv) + prod_term = tlinalg.cholesky_solve(lemma_factor, Atrp_Sinv) solve = tf.matmul( sigma.solve(scaled_I(1.0, sigma.size) - tf.matmul(A, prod_term)), x @@ -123,19 +124,19 @@ def solve_det_conditional(x, sigma, A, Q): """ # (Q - A' Sigma^{-1} A) - lemma_factor = tf.linalg.cholesky( + lemma_factor = tlinalg.cholesky( Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True)) logdet = ( -Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(lemma_factor))) + + 2 * tf.reduce_sum(tf.math.log(tlinalg.diag_part(lemma_factor))) ) # A' Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) # (Q - A' Sigma^{-1} A)^{-1} A' Sigma^{-1} - prod_term = tf.linalg.cholesky_solve(lemma_factor, Atrp_Sinv) + prod_term = tlinalg.cholesky_solve(lemma_factor, Atrp_Sinv) solve = tf.matmul( sigma.solve(scaled_I(1.0, sigma.size) + tf.matmul(A, prod_term)), x @@ -169,15 +170,15 @@ def _mnorm_logp_internal( if logging.getLogger().isEnabledFor(logging.DEBUG): solve_row = tf.Print( - solve_row, [tf.linalg.trace(solve_col)], "coltrace") + solve_row, [tlinalg.trace(solve_col)], "coltrace") solve_row = tf.Print( - solve_row, [tf.linalg.trace(solve_row)], "rowtrace") + solve_row, [tlinalg.trace(solve_row)], "rowtrace") solve_row = tf.Print(solve_row, [logdet_row], "logdet_row") solve_row = tf.Print(solve_row, [logdet_col], "logdet_col") denominator = (-rowsize * colsize * log2pi - colsize * logdet_row - rowsize * logdet_col) - numerator = -tf.linalg.trace(tf.matmul(solve_col, solve_row)) + numerator = -tlinalg.trace(tf.matmul(solve_col, solve_row)) return 0.5 * (numerator + denominator) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 9fa9f9cca..975e53e53 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -79,7 +79,7 @@ def test_matnorm_regression_optimizerChoice(): col_cov = CovUnconstrainedInvCholesky(size=p) model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov, - optimizer="CG") + optimizer="CG") model.fit(X, Y) From bc87a00c1650133bfe70592bc7d68eccd177c628 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 2 Mar 2020 23:45:09 -0800 Subject: [PATCH 23/84] need old TF for things to work --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5323f031b..3b83e90e9 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,7 @@ def finalize_options(self): 'psutil', 'nibabel', 'typing', - 'tensorflow', + 'tensorflow<=1.15', # brainiak.matnormal not compatible with tf2.0 'joblib', 'wheel', # See https://github.com/astropy/astropy-helpers/issues/501 ], From 05258b39d1bc6c9e4907495f8325beebe11fe464 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Tue, 3 Mar 2020 00:00:52 -0800 Subject: [PATCH 24/84] docstring formatting fix --- brainiak/matnormal/matnormal_likelihoods.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 3e9100212..9b95f8031 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -217,11 +217,10 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): """ Log likelihood for marginal centered matrix-variate normal density. - Given: .. math:: - X \\sim \\mathcal{MN}(0, Q, C)\\ - Y \\mid \\X \\sim \\mathcal{MN}(AX, R, C),\\ - Y \\sim \\mathcal{MN}(0, R + AQA, C) + X \\sim \\mathcal{MN}(0, Q, C)\\ + Y \\mid \\X \\sim \\mathcal{MN}(AX, R, C),\\ + Y \\sim \\mathcal{MN}(0, R + AQA, C) This function efficiently computes the marginals by unpacking some info in the covariance classes and then dispatching to solve_det_marginal. @@ -258,9 +257,9 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): Log likelihood for centered marginal matrix-variate normal density. .. math:: - X \\sim \\mathcal{MN}(0, R, Q)\\ - Y \\mid \\X \\sim \\mathcal{MN}(XA, R, C),\\ - Y \\sim \\mathcal{MN}(0, R, C + AQA) + X \\sim \\mathcal{MN}(0, R, Q)\\ + Y \\mid \\X \\sim \\mathcal{MN}(XA, R, C),\\ + Y \\sim \\mathcal{MN}(0, R, C + AQA) This function efficiently computes the marginals by unpacking some info in the covariance classes and then dispatching to solve_det_marginal. From 1a88cbd78e9e0f3a09c18dcc0f0e7bcd416fd79f Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Tue, 3 Mar 2020 21:04:21 -0800 Subject: [PATCH 25/84] fix bad merge --- brainiak/matnormal/covs.py | 16 +- brainiak/utils/utils.py | 314 ------------------------------------ tests/matnormal/test_cov.py | 20 ++- 3 files changed, 16 insertions(+), 334 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 2c55e52fd..38924323f 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -593,25 +593,23 @@ def __init__(self, base_cov, scale=1.0): self._baseCov = base_cov self._scale = scale - @define_scope + @property def logdet(self): """ log|Sigma| """ return self._baseCov.logdet + tf.log(self._scale) * self._baseCov.size - def Sigma_inv_x(self, X): + def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` """ - return self._baseCov.Sigma_inv_x(X) / self._scale + return self._baseCov.solve(X) / self._scale - @define_scope - def Sigma(self): + def _cov(self): """return Sigma """ - return self._baseCov.Sigma * self._scale - - @define_scope - def Sigma_inv(self): + return self._baseCov._cov * self._scale + + def _prec(self): """ Sigma^{-1}. Override me with more efficient implementation in subclasses """ diff --git a/brainiak/utils/utils.py b/brainiak/utils/utils.py index c0b11970d..47f403716 100644 --- a/brainiak/utils/utils.py +++ b/brainiak/utils/utils.py @@ -966,317 +966,7 @@ def array_correlation(x, y, axis=0): r : float or 1D ndarray Pearson correlation values for input variables """ -<<<<<<< HEAD -<<<<<<< HEAD - return p - - -def tf_solve_lower_triangular_kron(L, y): - """ Tensor flow function to solve L x = y - where L = kron(L[0], L[1] .. L[n-1]) - and L[i] are the lower triangular matrices - - Arguments - --------- - L : list of 2-D tensors - Each element of the list must be a tensorflow tensor and - must be a lower triangular matrix of dimension n_i x n_i - - y : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p - - Returns - ------- - x : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p - - """ - n = len(L) - if n == 1: - return tf.matrix_triangular_solve(L[0], y) - else: - x = y - na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] - - for i in range(na): - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) - t = xinb / L[0][i, i] - xinb = tf_solve_lower_triangular_kron(L[1:], t) - xina = xina - tf.reshape( - tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), [1, nb*col]), - [(na-i-1)*nb, col]) * \ - tf.reshape( - tf.tile(tf.reshape(t, [-1, 1]), [na-i-1, 1]), - [(na-i-1)*nb, col]) - x = tf.concat(axis=0, values=[xt, xinb, xina]) - - return x - - -def tf_solve_upper_triangular_kron(L, y): - """ Tensor flow function to solve L^T x = y - where L = kron(L[0], L[1] .. L[n-1]) - and L[i] are the lower triangular matrices - - Arguments - --------- - L : list of 2-D tensors - Each element of the list must be a tensorflow tensor and - must be a lower triangular matrix of dimension n_i x n_i - - y : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p - - Returns - ------- - x : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p - - """ - n = len(L) - if n == 1: - return tf.matrix_triangular_solve(L[0], y, adjoint=True) - else: - x = y - na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] - - for i in range(na-1, -1, -1): - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) - t = xinb / L[0][i, i] - xinb = tf_solve_upper_triangular_kron(L[1:], t) - xt = (xt - - tf.reshape( - tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), - [1, nb*col]), - [i*nb, col]) - * tf.reshape( - tf.tile(tf.reshape(t, [-1, 1]), [i, 1]), [i*nb, col])) - x = tf.concat(axis=0, values=[xt, xinb, xina]) - - return x - - -def tf_kron_mult(L, x): - """ Tensorflow multiply with kronecker product matrix - Returs kron(L[0], L[1] ...) * x - - Arguments - --------- - L : list of 2-D tensors - Each element of the list must be a tensorflow tensor and - must be a square matrix of dimension n_i x n_i - - x : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p - - Returns - ------- - y : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p - """ - n = len(L) - if n == 1: - return tf.matmul(L[0], x) - else: - na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] - xt = tf_kron_mult( - L[1:], - tf.transpose(tf.reshape(tf.transpose(x), [-1, nb]))) - y = tf.zeros_like(x) - for i in range(na): - ya, yb, yc = tf.split(y, [i*nb, nb, (na-i-1)*nb], 0) - yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), - tf.transpose(tf.slice(L[0], [i, 0], [1, na]))), - [nb, col]) - y = tf.concat(axis=0, values=[ya, yb, yc]) - return y - - -def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): - """ Tensor flow function to solve L x = y - where L is a lower triangular matrix with a mask - - Arguments - --------- - L : 2-D tensor - Must be a tensorflow tensor and - must be a triangular matrix of dimension n x n - - y : 1-D or 2-D tensor - Dimension n x p - - mask : 1-D tensor - Dimension n x 1, should be 1 if element is valid, 0 if invalid - - lower : boolean (default : True) - True if L is lower triangular, False if upper triangular - - adjoint : boolean (default : False) - True if solving for L^x = y, False if solving for Lx = y - - Returns - ------- - x : 1-D or 2-D tensor - Dimension n x p, values at rows for which mask == 0 are set to zero - - """ - - zero = tf.constant(0, dtype=tf.int32) - mask_mat = tf.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1, 1]), - tf.reshape(mask, [1, -1])), zero)) - q = tf.to_int32(tf.sqrt(tf.to_double(tf.shape(mask_mat)[0]))) - L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q]) - - maskindex = tf.where(tf.not_equal(mask, zero)) - y_masked = tf.gather_nd(y, maskindex) - - x_s1 = tf.matrix_triangular_solve(L_masked, y_masked, - lower=lower, adjoint=adjoint) - x = tf.scatter_nd(maskindex, x_s1, tf.to_int64(tf.shape(y))) - return x - - -def tf_solve_lower_triangular_masked_kron(L, y, mask): - """ Tensor flow function to solve L x = y - where L = kron(L[0], L[1] .. L[n-1]) - and L[i] are the lower triangular matrices - - Arguments - --------- - L : list of 2-D tensors - Each element of the list must be a tensorflow tensor and - must be a lower triangular matrix of dimension n_i x n_i - - y : 1-D or 2-D tensor - Dimension [n_0*n_1*..n_(m-1)), p] - - mask: 1-D tensor - Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0 - for don't care - - Returns - ------- - x : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p, values at rows - for which mask == 0 are set to zero - - """ - n = len(L) - if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, - lower=True, adjoint=False) - else: - x = y - na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] - - for i in range(na): - mask_b = tf.slice(mask, [i*nb], [nb]) - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) - t = xinb / L[0][i, i] - - if tf.reduce_sum(mask_b) != nb: - xinb = tf_solve_lower_triangular_masked_kron(L[1:], t, mask_b) - t_masked = tf_kron_mult(L[1:], xinb) - - else: - # all valid - same as no mask - xinb = tf_solve_lower_triangular_kron(L[1:], t) - t_masked = t - xina = (xina - - tf.reshape( - tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), - [1, nb*col]), - [(na-i-1)*nb, col]) - * tf.reshape( - tf.tile(tf.reshape(t_masked, [-1, 1]), [na-i-1, 1]), - [(na-i-1)*nb, col])) - - x = tf.concat(axis=0, values=[xt, xinb, xina]) - - return x - - -def tf_solve_upper_triangular_masked_kron(L, y, mask): - """ Tensor flow function to solve L^T x = y - where L = kron(L[0], L[1] .. L[n-1]) - and L[i] are the lower triangular matrices - - Arguments - --------- - L : list of 2-D tensors - Each element of the list must be a tensorflow tensor and - must be a lower triangular matrix of dimension n_i x n_i - - y : 1-D or 2-D tensor - Dimension [n_0*n_1*..n_(m-1)), p] - - mask: 1-D tensor - Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows - and 0 for don't care - - Returns - ------- - x : 1-D or 2-D tensor - Dimension (n_0*n_1*..n_(m-1)) x p, values at rows - for which mask == 0 are set to zero - - """ - n = len(L) - if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, - lower=True, adjoint=True) - else: - x = y - na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] - L1_end_tr = [tf.transpose(x) for x in L[1:]] - - for i in range(na-1, -1, -1): - mask_b = tf.slice(mask, [i*nb], [nb]) - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) - t = xinb / L[0][i, i] - - if tf.reduce_sum(mask_b) != nb: - xinb = tf_solve_upper_triangular_masked_kron(L[1:], t, mask_b) - t_masked = tf_kron_mult(L1_end_tr, xinb) - else: - xinb = tf_solve_upper_triangular_kron(L[1:], t) - t_masked = t - - xt = (xt - - tf.reshape( - tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), - [1, nb*col]), - [i*nb, col]) - * tf.reshape( - tf.tile(tf.reshape(t_masked, [-1, 1]), [i, 1]), - [i*nb, col])) - x = tf.concat(axis=0, values=[xt, xinb, xina]) - - return x -======= -======= ->>>>>>> matnormal-regression-rsa # Accommodate array-like inputs if not isinstance(x, np.ndarray): x = np.asarray(x) @@ -1304,7 +994,3 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): np.sum(y_demean ** 2, axis=0)) return numerator / denominator -<<<<<<< HEAD ->>>>>>> master -======= ->>>>>>> matnormal-regression-rsa diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index ad58a4f1b..2b307b389 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -1,6 +1,6 @@ import numpy as np from numpy.testing import assert_allclose -from scipy.stats import norm, wishart, invgamma +from scipy.stats import norm, wishart, invgamma, invwishart from brainiak.matnormal.covs import (CovIdentity, CovAR1, CovIsotropic, @@ -352,10 +352,10 @@ def test_CovAR1_scan_onsets(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) -<<<<<<< HEAD - assert_allclose(sinvx_np, cov.Sigma_inv_x(X_tf).eval(session=sess), + assert_allclose(sinvx_np, cov.solve(X_tf).eval(session=sess), rtol=rtol) + def test_CovScaleMixin(): base_cov = CovUnconstrainedCholesky(size=m) @@ -363,7 +363,6 @@ def test_CovScaleMixin(): scales = tf.constant(sc_np)*5 covs = [CovScaleMixin(base_cov, scales[j]) for j in range(5)] - with tf.Session() as sess: # initialize the random covariance sess.run(tf.variables_initializer(base_cov.get_optimize_vars())) @@ -371,12 +370,11 @@ def test_CovScaleMixin(): for j in range(5): # compute the naive version - cov_np = base_cov.Sigma.eval(session=sess) * scales[j].eval(session=sess) + cov_np = base_cov._cov.eval( + session=sess) * scales[j].eval(session=sess) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, covs[j].logdet.eval(session=sess), rtol=rtol, atol=atol) - assert_allclose(sinvx_np, covs[j].Sigma_inv_x(X_tf).eval(session=sess), rtol=rtol, atol=atol) -======= - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) ->>>>>>> matnormal-regression-rsa + assert_allclose(logdet_np, covs[j].logdet.eval( + session=sess), rtol=rtol, atol=atol) + assert_allclose(sinvx_np, covs[j].solve( + X_tf).eval(session=sess), rtol=rtol, atol=atol) From 06d68d4154ce4589808682039d5567b215f3f742 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sun, 28 Jun 2020 20:16:39 -0700 Subject: [PATCH 26/84] wip cleanup --- brainiak/matnormal/dpmnsrm.py | 48 ++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/brainiak/matnormal/dpmnsrm.py b/brainiak/matnormal/dpmnsrm.py index 1c7ddd85c..7bd5acfe3 100644 --- a/brainiak/matnormal/dpmnsrm.py +++ b/brainiak/matnormal/dpmnsrm.py @@ -1,15 +1,14 @@ import tensorflow as tf from pymanopt import Problem -from pymanopt.manifolds import Stiefel, Euclidean -from pymanopt.solvers import TrustRegions, ConjugateGradient +from pymanopt.manifolds import Stiefel +from pymanopt.solvers import ConjugateGradient from sklearn.base import BaseEstimator from brainiak.matnormal.covs import (CovIdentity, CovScaleMixin, - CovTFWrap, CovUnconstrainedCholesky) import numpy as np from brainiak.matnormal.matnormal_likelihoods import ( - matnorm_logp_marginal_col, matnorm_logp) + matnorm_logp_marginal_col) from tensorflow.contrib.opt import ScipyOptimizerInterface import logging @@ -56,33 +55,34 @@ def _eye(self, x): def _make_logp_op(self): """ MatnormSRM Log-likelihood""" subj_space_covs = [CovScaleMixin(base_cov=self.space_cov, - scale=1/self.rhoprec[j]) for j in range(self.n)] + scale=1/self.rhoprec[j]) for j in range(self.n)] if self.marg_cov_class is CovIdentity: return tf.reduce_sum( [matnorm_logp_marginal_col(self.X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=self.S, - marg_cov=CovIdentity(size=self.k)) + row_cov=subj_space_covs[j], + col_cov=self.time_cov,Î + marg=self.S, + marg_cov=CovIdentity(size=self.k)) for j in range(self.n)], name="lik_logp") elif self.marg_cov_class is CovUnconstrainedCholesky: return tf.reduce_sum( [matnorm_logp_marginal_col(self.X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=tf.matmul(self.marg_cov.L, self.S), - marg_cov=CovIdentity(size=self.k)) + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=tf.matmul( + self.marg_cov.L, self.S), + marg_cov=CovIdentity(size=self.k)) for j in range(self.n)], name="lik_logp") else: logger.warn("ECME with cov that is not identity or unconstrained may\ yield numerical instabilities! Use ECM for now.") return tf.reduce_sum( [matnorm_logp_marginal_col(self.X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=self.S, - marg_cov=self.marg_cov) + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=self.S, + marg_cov=self.marg_cov) for j in range(self.n)], name="lik_logp") def _make_Q_op(self): @@ -107,7 +107,9 @@ def _make_Q_op(self): for j in range(self.n)], 0)) if self.s_constraint == "gaussian": - s_quad_form = -tf.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(self.S)), self.S)) + s_quad_form = - \ + tf.trace(tf.matmul(self.time_cov.Sigma_inv_x( + tf.transpose(self.S)), self.S)) det_terms = -(self.v*self.n+self.k) * self.time_cov.logdet -\ (self.k+self.t)*self.n*self.space_cov.logdet +\ (self.k+self.t)*self.v*tf.reduce_sum(tf.log(self.rhoprec)) -\ @@ -122,8 +124,8 @@ def _make_Q_op(self): trace_prod = -tf.reduce_sum(self.rhoprec / self.rhoprec_prime) *\ tf.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ (tf.trace(tf.matmul(self.wcov_prime, self.marg_cov.Sigma_inv + - tf.matmul(self.S, self.time_cov.Sigma_inv_x( - tf.transpose(self.S)))))) + tf.matmul(self.S, self.time_cov.Sigma_inv_x( + tf.transpose(self.S)))))) return 0.5 * (det_terms + x_quad_form + @@ -152,7 +154,7 @@ def make_estep_ops(self): def make_mstep_b_op(self): return tf.expand_dims(tf.reduce_sum( [self.time_cov.Sigma_inv_x(tf.transpose(self.X[j] - - tf.matmul(self.w_prime[j], self.S))) + tf.matmul(self.w_prime[j], self.S))) for j in range(self.n)], 1) / tf.reduce_sum(self.time_cov.Sigma_inv), -1) @@ -212,7 +214,7 @@ def _init_vars(self, X): self.X = tf.constant(X, name="X") - xsvd = [np.linalg.svd(x)for x in X] + xsvd = [np.linalg.svd(x) for x in X] # parameters self.b = tf.Variable(np.random.normal(size=(self.n, self.v, 1)), @@ -378,7 +380,7 @@ def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): delQ = loss_end_mstep - loss_start if np.max(np.r_[w_norm, sigv_norm, - rhoprec_norm, delQ]) <= convergence_tol: + rhoprec_norm, delQ]) <= convergence_tol: converged = True break From c36242659194bc465b5e621a21e2aa82b4d8193c Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 29 Jun 2020 21:08:25 -0700 Subject: [PATCH 27/84] run tf1 -> tf2 conversion script --- brainiak/matnormal/covs.py | 32 +++++------ brainiak/matnormal/matnormal_likelihoods.py | 62 ++++++++++----------- brainiak/matnormal/mnrsa.py | 14 ++--- brainiak/matnormal/regression.py | 12 ++-- 4 files changed, 60 insertions(+), 60 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 29601aa93..e3e6833bb 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -186,7 +186,7 @@ def logdet(self): sigma = tf.exp(self.log_sigma) # now compute logdet return tf.reduce_sum( - 2 * tf.constant(self.run_sizes, dtype=tf.float64) * + input_tensor=2 * tf.constant(self.run_sizes, dtype=tf.float64) * tf.math.log(sigma) - tf.math.log(1 - tf.square(rho)) ) @@ -290,7 +290,7 @@ def __init__(self, size, diag_var=None): @property def logdet(self): - return -tf.reduce_sum(self.logprec) + return -tf.reduce_sum(input_tensor=self.logprec) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -322,7 +322,7 @@ def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): rate=tf.constant(beta, dtype=tf.float64), ) - self.logp = tf.reduce_sum(self.ig.log_prob(self.prec)) + self.logp = tf.reduce_sum(input_tensor=self.ig.log_prob(self.prec)) class CovUnconstrainedCholesky(CovBase): @@ -366,7 +366,7 @@ def __init__(self, size=None, Sigma=None): # it's positive. L_indeterminate = tf.linalg.band_part(self.L_full, -1, 0) - self.L = tf.matrix_set_diag( + self.L = tf.linalg.set_diag( L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) ) @@ -374,7 +374,7 @@ def __init__(self, size=None, Sigma=None): def logdet(self): # We save a log here by using the diag of L_full - return 2 * tf.reduce_sum((tf.linalg.diag_part(self.L_full))) + return 2 * tf.reduce_sum(input_tensor=(tf.linalg.diag_part(self.L_full))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -445,13 +445,13 @@ def __init__(self, size, invSigma=None): # Also: to make the parameterization unique we log the diagonal so # it's positive. L_indeterminate = tf.linalg.band_part(self.Linv_full, -1, 0) - self.Linv = tf.matrix_set_diag( + self.Linv = tf.linalg.set_diag( L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) ) @property def logdet(self): - return -2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(self.Linv))) + return -2 * tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.diag_part(self.Linv))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -530,7 +530,7 @@ def __init__(self, sizes, Sigmas=None, mask=None): L_indeterminate = [tf.linalg.band_part( mat, -1, 0) for mat in self.L_full] self.L = [ - tf.matrix_set_diag(mat, tf.exp(tf.linalg.diag_part(mat))) + tf.linalg.set_diag(mat, tf.exp(tf.linalg.diag_part(mat))) for mat in L_indeterminate ] @@ -545,25 +545,25 @@ def logdet(self): """ log|Sigma| using the diagonals of the cholesky factors. """ if self.mask is None: - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) + n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in self.L]) - n_prod = tf.reduce_prod(n_list) + n_prod = tf.reduce_prod(input_tensor=n_list) logdet = tf.stack( - [tf.reduce_sum(tf.math.log(tf.diag_part(mat))) + [tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.tensor_diag_part(mat))) for mat in self.L] ) - logdetfinal = tf.reduce_sum((logdet * n_prod) / n_list) + logdetfinal = tf.reduce_sum(input_tensor=(logdet * n_prod) / n_list) else: - n_list = [tf.shape(mat)[0] for mat in self.L] + n_list = [tf.shape(input=mat)[0] for mat in self.L] mask_reshaped = tf.reshape(self.mask, n_list) logdet = 0.0 for i in range(self.nfactors): indices = list(range(self.nfactors)) indices.remove(i) - logdet += tf.math.log(tf.diag_part(self.L[i])) * tf.to_double( - tf.reduce_sum(mask_reshaped, indices) + logdet += tf.math.log(tf.linalg.tensor_diag_part(self.L[i])) * tf.cast( + tf.reduce_sum(input_tensor=mask_reshaped, axis=indices), dtype=tf.float64 ) - logdetfinal = tf.reduce_sum(logdet) + logdetfinal = tf.reduce_sum(input_tensor=logdet) return 2.0 * logdetfinal def solve(self, X): diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 9b95f8031..0adf07fa3 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -20,8 +20,8 @@ def _condition(X): Symmetric tensor to compute condition number of """ - s = tf.svd(X, compute_uv=False) - return tf.reduce_max(s) / tf.reduce_min(s) + s = tf.linalg.svd(X, compute_uv=False) + return tf.reduce_max(input_tensor=s) / tf.reduce_min(input_tensor=s) def solve_det_marginal(x, sigma, A, Q): @@ -56,12 +56,12 @@ def solve_det_marginal(x, sigma, A, Q): if logging.getLogger().isEnabledFor(logging.DEBUG): logging.log(logging.DEBUG, "Printing diagnostics for solve_det_marginal") - A = tf.Print(A, [_condition(Q._prec + tf.matmul(A, sigma.solve(A), + A = tf.compat.v1.Print(A, [_condition(Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True))], "lemma_factor condition") - A = tf.Print(A, [_condition(Q._cov)], "Q condition") - A = tf.Print(A, [_condition(sigma._cov)], "sigma condition") - A = tf.Print(A, [tf.reduce_max(A), tf.reduce_min(A)], "A minmax") + A = tf.compat.v1.Print(A, [_condition(Q._cov)], "Q condition") + A = tf.compat.v1.Print(A, [_condition(sigma._cov)], "sigma condition") + A = tf.compat.v1.Print(A, [tf.reduce_max(input_tensor=A), tf.reduce_min(input_tensor=A)], "A minmax") # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like # a schur complement by isn't, so we call it the "lemma factor" @@ -72,15 +72,15 @@ def solve_det_marginal(x, sigma, A, Q): logdet = ( Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(tf.math.log(tlinalg.diag_part(lemma_factor))) + + 2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor))) ) if logging.getLogger().isEnabledFor(logging.DEBUG): - logdet = tf.Print(logdet, [Q.logdet], "Q logdet") - logdet = tf.Print(logdet, [sigma.logdet], "sigma logdet") - logdet = tf.Print( + logdet = tf.compat.v1.Print(logdet, [Q.logdet], "Q logdet") + logdet = tf.compat.v1.Print(logdet, [sigma.logdet], "sigma logdet") + logdet = tf.compat.v1.Print( logdet, - [2 * tf.reduce_sum(tf.math.log(tlinalg.diag_part(lemma_factor)))], + [2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor)))], "iqf logdet", ) @@ -130,7 +130,7 @@ def solve_det_conditional(x, sigma, A, Q): logdet = ( -Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(tf.math.log(tlinalg.diag_part(lemma_factor))) + + 2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor))) ) # A' Sigma^{-1} @@ -169,12 +169,12 @@ def _mnorm_logp_internal( log2pi = 1.8378770664093453 if logging.getLogger().isEnabledFor(logging.DEBUG): - solve_row = tf.Print( + solve_row = tf.compat.v1.Print( solve_row, [tlinalg.trace(solve_col)], "coltrace") - solve_row = tf.Print( + solve_row = tf.compat.v1.Print( solve_row, [tlinalg.trace(solve_row)], "rowtrace") - solve_row = tf.Print(solve_row, [logdet_row], "logdet_row") - solve_row = tf.Print(solve_row, [logdet_col], "logdet_col") + solve_row = tf.compat.v1.Print(solve_row, [logdet_row], "logdet_row") + solve_row = tf.compat.v1.Print(solve_row, [logdet_col], "logdet_col") denominator = (-rowsize * colsize * log2pi - colsize * logdet_row - rowsize * logdet_col) @@ -197,11 +197,11 @@ def matnorm_logp(x, row_cov, col_cov): """ - rowsize = tf.cast(tf.shape(x)[0], "float64") - colsize = tf.cast(tf.shape(x)[1], "float64") + rowsize = tf.cast(tf.shape(input=x)[0], "float64") + colsize = tf.cast(tf.shape(input=x)[1], "float64") # precompute sigma_col^{-1} * x' - solve_col = col_cov.solve(tf.transpose(x)) + solve_col = col_cov.solve(tf.transpose(a=x)) logdet_col = col_cov.logdet # precompute sigma_row^{-1} * x @@ -239,10 +239,10 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): Prior covariance implementing the CovBase API """ - rowsize = tf.cast(tf.shape(x)[0], "float64") - colsize = tf.cast(tf.shape(x)[1], "float64") + rowsize = tf.cast(tf.shape(input=x)[0], "float64") + colsize = tf.cast(tf.shape(input=x)[1], "float64") - solve_col = col_cov.solve(tf.transpose(x)) + solve_col = col_cov.solve(tf.transpose(a=x)) logdet_col = col_cov.logdet solve_row, logdet_row = solve_det_marginal(x, row_cov, marg, marg_cov) @@ -278,14 +278,14 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): Prior covariance implementing the CovBase API """ - rowsize = tf.cast(tf.shape(x)[0], "float64") - colsize = tf.cast(tf.shape(x)[1], "float64") + rowsize = tf.cast(tf.shape(input=x)[0], "float64") + colsize = tf.cast(tf.shape(input=x)[1], "float64") solve_row = row_cov.solve(x) logdet_row = row_cov.logdet solve_col, logdet_col = solve_det_marginal( - tf.transpose(x), col_cov, tf.transpose(marg), marg_cov + tf.transpose(a=x), col_cov, tf.transpose(a=marg), marg_cov ) return _mnorm_logp_internal( @@ -297,10 +297,10 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): """ """ - rowsize = tf.cast(tf.shape(x)[0], "float64") - colsize = tf.cast(tf.shape(x)[1], "float64") + rowsize = tf.cast(tf.shape(input=x)[0], "float64") + colsize = tf.cast(tf.shape(input=x)[1], "float64") - solve_col = col_cov.solve(tf.transpose(x)) + solve_col = col_cov.solve(tf.transpose(a=x)) logdet_col = col_cov.logdet solve_row, logdet_row = solve_det_conditional(x, row_cov, cond, cond_cov) @@ -322,14 +322,14 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): This method exploits the matrix inversion and determinant lemmas to construct S - APA' given the covariance API in in CovBase. """ - rowsize = tf.cast(tf.shape(x)[0], "float64") - colsize = tf.cast(tf.shape(x)[1], "float64") + rowsize = tf.cast(tf.shape(input=x)[0], "float64") + colsize = tf.cast(tf.shape(input=x)[1], "float64") solve_row = row_cov.solve(x) logdet_row = row_cov.logdet solve_col, logdet_col = solve_det_conditional( - tf.transpose(x), col_cov, tf.transpose(cond), cond_cov + tf.transpose(a=x), col_cov, tf.transpose(a=cond), cond_cov ) return _mnorm_logp_internal( diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index adbe6ac81..07bea456f 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -63,8 +63,8 @@ def __init__(self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", self.optCtrl, self.optMethod = optCtrl, optimizer # placeholders for inputs - self.X = tf.placeholder(tf.float64, [self.n_T, None], name="Design") - self.Y = tf.placeholder(tf.float64, [self.n_T, self.n_V], name="Brain") + self.X = tf.compat.v1.placeholder(tf.float64, [self.n_T, None], name="Design") + self.Y = tf.compat.v1.placeholder(tf.float64, [self.n_T, self.n_V], name="Brain") self.X_0 = tf.Variable( tf.random.normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0" @@ -79,7 +79,7 @@ def __init__(self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", self.train_variables.extend(self.space_cov.get_optimize_vars()) # create a tf session we reuse for this object - self.sess = tf.Session() + self.sess = tf.compat.v1.Session() def fit(self, X, y, structured_RSA_cov=None): """ Estimate dimension reduction and cognitive model parameters @@ -118,14 +118,14 @@ def fit(self, X, y, structured_RSA_cov=None): self.L_full = tf.Variable(naiveRSA_L, name="L_full", dtype="float64") L_indeterminate = tf.linalg.band_part(self.L_full, -1, 0) - self.L = tf.matrix_set_diag( + self.L = tf.linalg.set_diag( L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) ) self.train_variables.extend([self.L_full]) self.x_stack = tf.concat([tf.matmul(self.X, self.L), self.X_0], 1) - self.sess.run(tf.global_variables_initializer(), feed_dict=feed_dict) + self.sess.run(tf.compat.v1.global_variables_initializer(), feed_dict=feed_dict) optimizer = ScipyOptimizerInterface( -self.logp(), @@ -136,9 +136,9 @@ def fit(self, X, y, structured_RSA_cov=None): logging_ops = [] logging_ops.append(tf.print("min(grad): ", tf.reduce_min( - optimizer._packed_loss_grad), output_stream=tflog.info)) + input_tensor=optimizer._packed_loss_grad), output_stream=tflog.info)) logging_ops.append(tf.print("max(grad): ", tf.reduce_max( - optimizer._packed_loss_grad), output_stream=tflog.info)) + input_tensor=optimizer._packed_loss_grad), output_stream=tflog.info)) logging_ops.append( tf.print("logp", self.logp(), output_stream=tflog.info)) self.sess.run(logging_ops, feed_dict=feed_dict) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 6c16ff47e..3fd9bb6ac 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -38,12 +38,12 @@ def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", self.n_t = time_cov.size self.n_v = space_cov.size - self.Y = tf.placeholder(tf.float64, [self.n_t, self.n_v], name="Y") + self.Y = tf.compat.v1.placeholder(tf.float64, [self.n_t, self.n_v], name="Y") - self.X = tf.placeholder(tf.float64, [self.n_t, None], name="X") + self.X = tf.compat.v1.placeholder(tf.float64, [self.n_t, None], name="X") # create a tf session we reuse for this object - self.sess = tf.Session() + self.sess = tf.compat.v1.Session() # @define_scope def logp(self): @@ -67,7 +67,7 @@ def fit(self, X, y): self.n_c = X.shape[1] feed_dict = {self.X: X, self.Y: y} - self.sess.run(tf.global_variables_initializer(), feed_dict=feed_dict) + self.sess.run(tf.compat.v1.global_variables_initializer(), feed_dict=feed_dict) # initialize to the least squares solution (basically all # we need now is the cov) @@ -87,7 +87,7 @@ def fit(self, X, y): self.train_variables.extend(self.time_cov.get_optimize_vars()) self.train_variables.extend(self.space_cov.get_optimize_vars()) - self.sess.run(tf.variables_initializer([self.beta])) + self.sess.run(tf.compat.v1.variables_initializer([self.beta])) optimizer = ScipyOptimizerInterface( -self.logp(), @@ -132,7 +132,7 @@ def calibrate(self, Y): ) # Sigma_s^{-1} B' - Sigma_s_btrp = self.space_cov.solve(tf.transpose(self.beta)) + Sigma_s_btrp = self.space_cov.solve(tf.transpose(a=self.beta)) # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) # (B Sigma_s^{-1} B')^{-1} From 16bf442307a98042d2d29fcd89ccd4da18ec9be9 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 29 Jun 2020 21:14:52 -0700 Subject: [PATCH 28/84] run v1 -> v2 conversion on tests --- tests/matnormal/test_cov.py | 56 +++++++++---------- tests/matnormal/test_matnormal_logp.py | 8 +-- .../test_matnormal_logp_conditional.py | 8 +-- .../matnormal/test_matnormal_logp_marginal.py | 8 +-- 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 22e216da5..388fa85d8 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -68,9 +68,9 @@ def test_CovConstant(): cov_np = wishart.rvs(df=m + 2, scale=np.eye(m)) cov = CovUnconstrainedCholesky(Sigma=cov_np) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # verify what we pass is what we get cov_tf = cov._cov.eval(session=sess) @@ -88,9 +88,9 @@ def test_CovIdentity(): cov = CovIdentity(size=m) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version cov_np = np.eye(m) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) @@ -104,9 +104,9 @@ def test_CovIsotropic(): cov = CovIsotropic(size=m) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version cov_np = cov._cov.eval(session=sess) * np.eye(cov.size) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) @@ -120,9 +120,9 @@ def test_CovDiagonal(): cov = CovDiagonal(size=m) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version cov_np = np.diag(1 / cov.prec.eval(session=sess)) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) @@ -137,9 +137,9 @@ def test_CovDiagonal_initialized(): cov_np = np.diag(np.exp(np.random.normal(size=m))) cov = CovDiagonal(size=m, diag_var=np.diag(cov_np)) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) @@ -156,9 +156,9 @@ def test_CovDiagonalGammaPrior(): ig = invgamma(1.5, scale=1e-10) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) penalty_np = np.sum(ig.logpdf(1 / np.diag(cov_np))) @@ -173,9 +173,9 @@ def test_CovUnconstrainedCholesky(): cov = CovUnconstrainedCholesky(size=m) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version L = cov.L.eval(session=sess) cov_np = L @ L.T @@ -190,9 +190,9 @@ def test_CovUnconstrainedCholeskyWishartReg(): cov = CovUnconstrainedCholeskyWishartReg(size=m) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version L = cov.L.eval(session=sess) @@ -213,9 +213,9 @@ def test_CovUnconstrainedInvCholesky(): init = invwishart.rvs(scale=np.eye(m), df=m + 2) cov = CovUnconstrainedInvCholesky(size=m, invSigma=init) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version Linv = cov.Linv.eval(session=sess) L = np.linalg.inv(Linv) @@ -239,9 +239,9 @@ def test_Cov2FactorKron(): cov = CovKroneckerFactored(sizes=[dim1, dim2]) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) @@ -262,9 +262,9 @@ def test_Cov3FactorKron(): dim3 = 2 cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3]) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) @@ -295,9 +295,9 @@ def test_Cov3FactorMaskedKron(): cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3], mask=mask) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version L1 = (cov.L[0]).eval(session=sess) L2 = (cov.L[1]).eval(session=sess) @@ -329,9 +329,9 @@ def test_CovAR1(): cov = CovAR1(size=m) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version cov_np = np.linalg.inv(cov.solve(eye).eval(session=sess)) @@ -345,9 +345,9 @@ def test_CovAR1_scan_onsets(): cov = CovAR1(size=m, scan_onsets=[0, m // 2]) - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # initialize the random covariance - sess.run(tf.variables_initializer(cov.get_optimize_vars())) + sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) # compute the naive version cov_np = np.linalg.inv(cov.solve(eye).eval(session=sess)) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index 23118e576..ed3060a81 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -20,14 +20,14 @@ def test_against_scipy_mvn_row(): - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: rowcov = CovUnconstrainedCholesky(size=m) colcov = CovIdentity(size=n) X = rmn(np.eye(m), np.eye(n)) X_tf = tf.constant(X, "float64") - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) rowcov_np = rowcov._cov.eval(session=sess) @@ -39,14 +39,14 @@ def test_against_scipy_mvn_row(): def test_against_scipy_mvn_col(): - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: rowcov = CovIdentity(size=m) colcov = CovUnconstrainedCholesky(size=n) X = rmn(np.eye(m), np.eye(n)) X_tf = tf.constant(X, "float64") - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) colcov_np = colcov._cov.eval(session=sess) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 8b83e0828..5865b1da4 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -40,9 +40,9 @@ def test_against_scipy_mvn_row_conditional(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) Q_np = Q._cov.eval(session=sess) @@ -73,9 +73,9 @@ def test_against_scipy_mvn_col_conditional(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) Q_np = Q._cov.eval(session=sess) diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index bbb219671..a81956a64 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -34,9 +34,9 @@ def test_against_scipy_mvn_row_marginal(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) Q_np = Q._cov.eval(session=sess) @@ -62,9 +62,9 @@ def test_against_scipy_mvn_col_marginal(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: - sess.run(tf.global_variables_initializer()) + sess.run(tf.compat.v1.global_variables_initializer()) Q_np = Q._cov.eval(session=sess) From e4f28d681e1c34ee58b07c1dd58494534488b9e1 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 29 Jun 2020 21:22:44 -0700 Subject: [PATCH 29/84] tf 1 -> 2 on solvers --- brainiak/utils/kronecker_solvers.py | 68 ++++++++++++++--------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/brainiak/utils/kronecker_solvers.py b/brainiak/utils/kronecker_solvers.py index c3b4752cf..ce32c12db 100644 --- a/brainiak/utils/kronecker_solvers.py +++ b/brainiak/utils/kronecker_solvers.py @@ -28,14 +28,14 @@ def tf_solve_lower_triangular_kron(L, y): """ n = len(L) if n == 1: - return tf.matrix_triangular_solve(L[0], y) + return tf.linalg.triangular_solve(L[0], y) else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] + n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) + nb = tf.cast(n_prod/na, dtype=tf.int32) + col = tf.shape(input=x)[1] for i in range(na): xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) @@ -74,14 +74,14 @@ def tf_solve_upper_triangular_kron(L, y): """ n = len(L) if n == 1: - return tf.matrix_triangular_solve(L[0], y, adjoint=True) + return tf.linalg.triangular_solve(L[0], y, adjoint=True) else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] + n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) + nb = tf.cast(n_prod/na, dtype=tf.int32) + col = tf.shape(input=x)[1] for i in range(na-1, -1, -1): xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) @@ -89,7 +89,7 @@ def tf_solve_upper_triangular_kron(L, y): xinb = tf_solve_upper_triangular_kron(L[1:], t) xt = (xt - tf.reshape( - tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), + tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), [1, nb*col]), [i*nb, col]) * tf.reshape( @@ -122,18 +122,18 @@ def tf_kron_mult(L, x): return tf.matmul(L[0], x) else: na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] + n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) + nb = tf.cast(n_prod/na, dtype=tf.int32) + col = tf.shape(input=x)[1] xt = tf_kron_mult( L[1:], - tf.transpose(tf.reshape(tf.transpose(x), [-1, nb]))) + tf.transpose(a=tf.reshape(tf.transpose(a=x), [-1, nb]))) y = tf.zeros_like(x) for i in range(na): ya, yb, yc = tf.split(y, [i*nb, nb, (na-i-1)*nb], 0) yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), - tf.transpose(tf.slice(L[0], + tf.transpose(a=tf.slice(L[0], [i, 0], [1, na]))), [nb, col]) @@ -171,18 +171,18 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): """ zero = tf.constant(0, dtype=tf.int32) - mask_mat = tf.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1, 1]), + mask_mat = tf.compat.v1.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1, 1]), tf.reshape(mask, [1, -1])), zero)) - q = tf.to_int32(tf.sqrt(tf.to_double(tf.shape(mask_mat)[0]))) + q = tf.cast(tf.sqrt(tf.cast(tf.shape(input=mask_mat)[0], dtype=tf.float64)), dtype=tf.int32) L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q]) - maskindex = tf.where(tf.not_equal(mask, zero)) + maskindex = tf.compat.v1.where(tf.not_equal(mask, zero)) y_masked = tf.gather_nd(y, maskindex) - x_s1 = tf.matrix_triangular_solve(L_masked, y_masked, + x_s1 = tf.linalg.triangular_solve(L_masked, y_masked, lower=lower, adjoint=adjoint) - x = tf.scatter_nd(maskindex, x_s1, tf.to_int64(tf.shape(y))) + x = tf.scatter_nd(maskindex, x_s1, tf.cast(tf.shape(input=y), dtype=tf.int64)) return x @@ -219,17 +219,17 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] + n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) + nb = tf.cast(n_prod/na, dtype=tf.int32) + col = tf.shape(input=x)[1] for i in range(na): mask_b = tf.slice(mask, [i*nb], [nb]) xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) t = xinb / L[0][i, i] - if tf.reduce_sum(mask_b) != nb: + if tf.reduce_sum(input_tensor=mask_b) != nb: xinb = tf_solve_lower_triangular_masked_kron(L[1:], t, mask_b) t_masked = tf_kron_mult(L[1:], xinb) @@ -283,18 +283,18 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.to_double(tf.shape(mat)[0]) for mat in L]) - n_prod = tf.to_int32(tf.reduce_prod(n_list)) - nb = tf.to_int32(n_prod/na) - col = tf.shape(x)[1] - L1_end_tr = [tf.transpose(x) for x in L[1:]] + n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) + nb = tf.cast(n_prod/na, dtype=tf.int32) + col = tf.shape(input=x)[1] + L1_end_tr = [tf.transpose(a=x) for x in L[1:]] for i in range(na-1, -1, -1): mask_b = tf.slice(mask, [i*nb], [nb]) xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) t = xinb / L[0][i, i] - if tf.reduce_sum(mask_b) != nb: + if tf.reduce_sum(input_tensor=mask_b) != nb: xinb = tf_solve_upper_triangular_masked_kron(L[1:], t, mask_b) t_masked = tf_kron_mult(L1_end_tr, xinb) else: @@ -303,7 +303,7 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): xt = (xt - tf.reshape( - tf.tile(tf.transpose(tf.slice(L[0], [i, 0], [1, i])), + tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), [1, nb*col]), [i*nb, col]) * tf.reshape( From dea4d1002aceddca779a009aad254b9b2bfd1fbe Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 29 Jun 2020 21:23:25 -0700 Subject: [PATCH 30/84] test_cov passes --- brainiak/matnormal/covs.py | 11 ++++++----- setup.py | 3 ++- tests/matnormal/test_cov.py | 7 ++++++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index e3e6833bb..9b28d10f3 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -3,7 +3,8 @@ import abc import scipy.linalg import scipy.sparse -from tensorflow.contrib.distributions import InverseGamma, WishartCholesky +import tensorflow_probability as tfp + from brainiak.matnormal.utils import x_tx, xx_t from brainiak.utils.kronecker_solvers import ( tf_solve_lower_triangular_kron, @@ -317,9 +318,9 @@ class CovDiagonalGammaPrior(CovDiagonal): def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): super(CovDiagonalGammaPrior, self).__init__(size, sigma) - self.ig = InverseGamma( + self.ig = tfp.distributions.InverseGamma( concentration=tf.constant(alpha, dtype=tf.float64), - rate=tf.constant(beta, dtype=tf.float64), + scale=tf.constant(beta, dtype=tf.float64), ) self.logp = tf.reduce_sum(input_tensor=self.ig.log_prob(self.prec)) @@ -410,9 +411,9 @@ class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): def __init__(self, size, Sigma=None): super(CovUnconstrainedCholeskyWishartReg, self).__init__(size) - self.wishartReg = WishartCholesky( + self.wishartReg = tfp.distributions.WishartTriL( df=tf.constant(size + 2, dtype=tf.float64), - scale=tf.constant(1e5 * np.eye(size), dtype=tf.float64), + scale_tril=tf.constant(1e5 * np.eye(size), dtype=tf.float64), ) Sigma = xx_t(self.L) diff --git a/setup.py b/setup.py index 3b83e90e9..86a0dc9aa 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,8 @@ def finalize_options(self): 'psutil', 'nibabel', 'typing', - 'tensorflow<=1.15', # brainiak.matnormal not compatible with tf2.0 + 'tensorflow', + 'tensorflow_probability', 'joblib', 'wheel', # See https://github.com/astropy/astropy-helpers/issues/501 ], diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 388fa85d8..06e00d1b1 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -1,6 +1,10 @@ import numpy as np from numpy.testing import assert_allclose from scipy.stats import norm, wishart, invgamma, invwishart + +import tensorflow as tf +tf.compat.v1.disable_eager_execution() + from brainiak.matnormal.covs import ( CovIdentity, CovAR1, @@ -12,10 +16,11 @@ CovUnconstrainedInvCholesky, CovKroneckerFactored, ) -import tensorflow as tf import pytest import logging + + logging.basicConfig(level=logging.DEBUG) # X is m x n, so A sould be m x p From a5b2c7efe4653fa1e1f8282bcf9d14ccc464a265 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 29 Jun 2020 21:25:02 -0700 Subject: [PATCH 31/84] logp tests pass --- tests/matnormal/test_matnormal_logp.py | 2 ++ tests/matnormal/test_matnormal_logp_conditional.py | 2 ++ tests/matnormal/test_matnormal_logp_marginal.py | 2 ++ 3 files changed, 6 insertions(+) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index ed3060a81..dd2ad46b1 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -2,6 +2,8 @@ from numpy.testing import assert_allclose from scipy.stats import multivariate_normal import tensorflow as tf +tf.compat.v1.disable_eager_execution() + from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import matnorm_logp from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 5865b1da4..977136030 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -2,6 +2,8 @@ from numpy.testing import assert_allclose from scipy.stats import wishart, multivariate_normal import tensorflow as tf +tf.compat.v1.disable_eager_execution() + from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import ( matnorm_logp_conditional_col, diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index a81956a64..532081efa 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -2,6 +2,8 @@ from numpy.testing import assert_allclose from scipy.stats import multivariate_normal import tensorflow as tf +tf.compat.v1.disable_eager_execution() + from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import ( matnorm_logp_marginal_col, From d82a1b661a1ff898e7d69b9a20846138320f4ee6 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 27 Jul 2020 15:46:15 -0700 Subject: [PATCH 32/84] cov tests pass eager --- tests/matnormal/test_cov.py | 304 +++++++++++++++--------------------- 1 file changed, 126 insertions(+), 178 deletions(-) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 06e00d1b1..6782998b7 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -3,7 +3,6 @@ from scipy.stats import norm, wishart, invgamma, invwishart import tensorflow as tf -tf.compat.v1.disable_eager_execution() from brainiak.matnormal.covs import ( CovIdentity, @@ -73,68 +72,55 @@ def test_CovConstant(): cov_np = wishart.rvs(df=m + 2, scale=np.eye(m)) cov = CovUnconstrainedCholesky(Sigma=cov_np) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) + # verify what we pass is what we get + cov_tf = cov._cov + assert_allclose(cov_tf, cov_np) - # verify what we pass is what we get - cov_tf = cov._cov.eval(session=sess) - assert_allclose(cov_tf, cov_np) - - # compute the naive version - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + # compute the naive version + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_CovIdentity(): cov = CovIdentity(size=m) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - cov_np = np.eye(m) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + # compute the naive version + cov_np = np.eye(m) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_CovIsotropic(): cov = CovIsotropic(size=m) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - cov_np = cov._cov.eval(session=sess) * np.eye(cov.size) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + # compute the naive version + cov_np = cov._cov * np.eye(cov.size) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_CovDiagonal(): cov = CovDiagonal(size=m) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - cov_np = np.diag(1 / cov.prec.eval(session=sess)) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + # compute the naive version + cov_np = np.diag(1 / cov.prec) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_CovDiagonal_initialized(): @@ -142,15 +128,12 @@ def test_CovDiagonal_initialized(): cov_np = np.diag(np.exp(np.random.normal(size=m))) cov = CovDiagonal(size=m, diag_var=np.diag(cov_np)) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + # compute the naive version + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_CovDiagonalGammaPrior(): @@ -161,56 +144,44 @@ def test_CovDiagonalGammaPrior(): ig = invgamma(1.5, scale=1e-10) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - penalty_np = np.sum(ig.logpdf(1 / np.diag(cov_np))) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) - assert_allclose(penalty_np, cov.logp.eval(session=sess), rtol=rtol) + # compute the naive version + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + penalty_np = np.sum(ig.logpdf(1 / np.diag(cov_np))) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) + assert_allclose(penalty_np, cov.logp, rtol=rtol) def test_CovUnconstrainedCholesky(): cov = CovUnconstrainedCholesky(size=m) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - L = cov.L.eval(session=sess) - cov_np = L @ L.T - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + L = cov.L.numpy() + cov_np = L @ L.T + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_CovUnconstrainedCholeskyWishartReg(): cov = CovUnconstrainedCholeskyWishartReg(size=m) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - - L = cov.L.eval(session=sess) - cov_np = L @ L.T + L = cov.L.numpy() + cov_np = L @ L.T - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) - # now compute the regularizer - reg = wishart.logpdf(cov_np, df=m + 2, scale=1e10 * np.eye(m)) - assert_allclose(reg, cov.logp.eval(session=sess), rtol=rtol) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) + # now compute the regularizer + reg = wishart.logpdf(cov_np, df=m + 2, scale=1e10 * np.eye(m)) + assert_allclose(reg, cov.logp, rtol=rtol) def test_CovUnconstrainedInvCholesky(): @@ -218,19 +189,15 @@ def test_CovUnconstrainedInvCholesky(): init = invwishart.rvs(scale=np.eye(m), df=m + 2) cov = CovUnconstrainedInvCholesky(size=m, invSigma=init) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - Linv = cov.Linv.eval(session=sess) - L = np.linalg.inv(Linv) - cov_np = L @ L.T + Linv = cov.Linv + L = np.linalg.inv(Linv) + cov_np = L @ L.T - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_Cov2FactorKron(): @@ -244,20 +211,16 @@ def test_Cov2FactorKron(): cov = CovKroneckerFactored(sizes=[dim1, dim2]) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - L1 = (cov.L[0]).eval(session=sess) - L2 = (cov.L[1]).eval(session=sess) - cov_np = np.kron(np.dot(L1, L1.transpose()), - np.dot(L2, L2.transpose())) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + L1 = (cov.L[0]).numpy() + L2 = (cov.L[1]).numpy() + cov_np = np.kron(np.dot(L1, L1.transpose()), + np.dot(L2, L2.transpose())) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_Cov3FactorKron(): @@ -267,23 +230,20 @@ def test_Cov3FactorKron(): dim3 = 2 cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3]) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - L1 = (cov.L[0]).eval(session=sess) - L2 = (cov.L[1]).eval(session=sess) - L3 = (cov.L[2]).eval(session=sess) - cov_np = np.kron( - np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose())), - np.dot(L3, L3.transpose()), - ) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinv_np, cov.solve(eye).eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + + L1 = (cov.L[0]).numpy() + L2 = (cov.L[1]).numpy() + L3 = (cov.L[2]).numpy() + cov_np = np.kron( + np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose())), + np.dot(L3, L3.transpose()), + ) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_Cov3FactorMaskedKron(): @@ -300,63 +260,51 @@ def test_Cov3FactorMaskedKron(): cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3], mask=mask) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - L1 = (cov.L[0]).eval(session=sess) - L2 = (cov.L[1]).eval(session=sess) - L3 = (cov.L[2]).eval(session=sess) - cov_np_factor = np.kron(L1, np.kron(L2, L3))[ - np.ix_(mask_indices, mask_indices)] - cov_np = np.dot(cov_np_factor, cov_np_factor.transpose()) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np( - X[mask_indices, :], cov_np) - - assert_allclose(logdet_np, cov.logdet.eval( - session=sess), rtol=rtol, atol=atol) - assert_allclose( - sinv_np, - cov.solve(eye).eval(session=sess)[ - np.ix_(mask_indices, mask_indices)], - rtol=rtol, - atol=atol, - ) - assert_allclose( - sinvx_np, - cov.solve(X_tf).eval(session=sess)[mask_indices, :], - rtol=rtol, - atol=atol, - ) + L1 = (cov.L[0]).numpy() + L2 = (cov.L[1]).numpy() + L3 = (cov.L[2]).numpy() + cov_np_factor = np.kron(L1, np.kron(L2, L3))[ + np.ix_(mask_indices, mask_indices)] + cov_np = np.dot(cov_np_factor, cov_np_factor.transpose()) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np( + X[mask_indices, :], cov_np) + + assert_allclose(logdet_np, cov.logdet, rtol=rtol, atol=atol) + assert_allclose( + sinv_np, + cov.solve(eye).numpy()[ + np.ix_(mask_indices, mask_indices)], + rtol=rtol, + atol=atol, + ) + assert_allclose( + sinvx_np, + cov.solve(X_tf).numpy()[mask_indices, :], + rtol=rtol, + atol=atol, + ) def test_CovAR1(): cov = CovAR1(size=m) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - cov_np = np.linalg.inv(cov.solve(eye).eval(session=sess)) + cov_np = np.linalg.inv(cov.solve(eye)) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) def test_CovAR1_scan_onsets(): cov = CovAR1(size=m, scan_onsets=[0, m // 2]) - with tf.compat.v1.Session() as sess: - # initialize the random covariance - sess.run(tf.compat.v1.variables_initializer(cov.get_optimize_vars())) - # compute the naive version - cov_np = np.linalg.inv(cov.solve(eye).eval(session=sess)) + # compute the naive version + cov_np = np.linalg.inv(cov.solve(eye)) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) - assert_allclose(logdet_np, cov.logdet.eval(session=sess), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf).eval(session=sess), rtol=rtol) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) + assert_allclose(logdet_np, cov.logdet, rtol=rtol) + assert_allclose(sinvx_np, cov.solve( + X_tf), rtol=rtol) From b0eb0c64acc35e939ab18d7a9f78889517a84b1c Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 27 Jul 2020 15:48:06 -0700 Subject: [PATCH 33/84] matnorm test passes in eager --- tests/matnormal/test_matnormal_logp.py | 47 +++++++++++--------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index dd2ad46b1..304823f17 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -2,7 +2,6 @@ from numpy.testing import assert_allclose from scipy.stats import multivariate_normal import tensorflow as tf -tf.compat.v1.disable_eager_execution() from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import matnorm_logp @@ -22,38 +21,30 @@ def test_against_scipy_mvn_row(): - with tf.compat.v1.Session() as sess: + rowcov = CovUnconstrainedCholesky(size=m) + colcov = CovIdentity(size=n) + X = rmn(np.eye(m), np.eye(n)) + X_tf = tf.constant(X, "float64") - rowcov = CovUnconstrainedCholesky(size=m) - colcov = CovIdentity(size=n) - X = rmn(np.eye(m), np.eye(n)) - X_tf = tf.constant(X, "float64") + rowcov_np = rowcov._cov - sess.run(tf.compat.v1.global_variables_initializer()) - - rowcov_np = rowcov._cov.eval(session=sess) - - scipy_answer = np.sum(multivariate_normal.logpdf( - X.T, np.zeros([m]), rowcov_np)) - tf_answer = matnorm_logp(X_tf, rowcov, colcov) - assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + scipy_answer = np.sum(multivariate_normal.logpdf( + X.T, np.zeros([m]), rowcov_np)) + tf_answer = matnorm_logp(X_tf, rowcov, colcov) + assert_allclose(scipy_answer, tf_answer, rtol=rtol) def test_against_scipy_mvn_col(): - with tf.compat.v1.Session() as sess: - - rowcov = CovIdentity(size=m) - colcov = CovUnconstrainedCholesky(size=n) - X = rmn(np.eye(m), np.eye(n)) - X_tf = tf.constant(X, "float64") - - sess.run(tf.compat.v1.global_variables_initializer()) + rowcov = CovIdentity(size=m) + colcov = CovUnconstrainedCholesky(size=n) + X = rmn(np.eye(m), np.eye(n)) + X_tf = tf.constant(X, "float64") - colcov_np = colcov._cov.eval(session=sess) + colcov_np = colcov._cov - scipy_answer = np.sum(multivariate_normal.logpdf(X, - np.zeros([n]), - colcov_np)) - tf_answer = matnorm_logp(X_tf, rowcov, colcov) - assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + scipy_answer = np.sum(multivariate_normal.logpdf(X, + np.zeros([n]), + colcov_np)) + tf_answer = matnorm_logp(X_tf, rowcov, colcov) + assert_allclose(scipy_answer, tf_answer, rtol=rtol) From 0ba7509dd6df05b9a922f5cb2c14dcd5dc425e0d Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 27 Jul 2020 15:50:30 -0700 Subject: [PATCH 34/84] rest of likelihoods pass in eager --- .../test_matnormal_logp_conditional.py | 35 ++++++++----------- .../matnormal/test_matnormal_logp_marginal.py | 33 +++++++---------- 2 files changed, 27 insertions(+), 41 deletions(-) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 977136030..05b48255b 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -2,7 +2,6 @@ from numpy.testing import assert_allclose from scipy.stats import wishart, multivariate_normal import tensorflow as tf -tf.compat.v1.disable_eager_execution() from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import ( @@ -42,20 +41,17 @@ def test_against_scipy_mvn_row_conditional(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) + Q_np = Q._cov - Q_np = Q._cov.eval(session=sess) + rowcov_np = rowcov._cov -\ + A.dot(np.linalg.inv(Q_np)).dot((A.T)) - rowcov_np = rowcov._cov.eval(session=sess) -\ - A.dot(np.linalg.inv(Q_np)).dot((A.T)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X.T, np.zeros([m]), rowcov_np)) - scipy_answer = np.sum(multivariate_normal.logpdf( - X.T, np.zeros([m]), rowcov_np)) - - tf_answer = matnorm_logp_conditional_row(X_tf, rowcov, colcov, A_tf, Q) - assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + tf_answer = matnorm_logp_conditional_row(X_tf, rowcov, colcov, A_tf, Q) + assert_allclose(scipy_answer, tf_answer, rtol=rtol) def test_against_scipy_mvn_col_conditional(): @@ -75,18 +71,15 @@ def test_against_scipy_mvn_col_conditional(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.compat.v1.Session() as sess: - - sess.run(tf.compat.v1.global_variables_initializer()) - Q_np = Q._cov.eval(session=sess) + Q_np = Q._cov - colcov_np = colcov._cov.eval(session=sess) -\ - A.T.dot(np.linalg.inv(Q_np)).dot((A)) + colcov_np = colcov._cov -\ + A.T.dot(np.linalg.inv(Q_np)).dot((A)) - scipy_answer = np.sum(multivariate_normal.logpdf( - X, np.zeros([n]), colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X, np.zeros([n]), colcov_np)) - tf_answer = matnorm_logp_conditional_col(X_tf, rowcov, colcov, A_tf, Q) + tf_answer = matnorm_logp_conditional_col(X_tf, rowcov, colcov, A_tf, Q) - assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + assert_allclose(scipy_answer, tf_answer, rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index 532081efa..53649c8e2 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -2,7 +2,6 @@ from numpy.testing import assert_allclose from scipy.stats import multivariate_normal import tensorflow as tf -tf.compat.v1.disable_eager_execution() from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import ( @@ -36,20 +35,17 @@ def test_against_scipy_mvn_row_marginal(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) + Q_np = Q._cov - Q_np = Q._cov.eval(session=sess) + rowcov_np = rowcov._cov + A.dot(Q_np).dot(A.T) - rowcov_np = rowcov._cov.eval(session=sess) + A.dot(Q_np).dot(A.T) + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, + np.zeros([m]), + rowcov_np)) - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, - np.zeros([m]), - rowcov_np)) - - tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, A_tf, Q) - assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, A_tf, Q) + assert_allclose(scipy_answer, tf_answer, rtol=rtol) def test_against_scipy_mvn_col_marginal(): @@ -64,16 +60,13 @@ def test_against_scipy_mvn_col_marginal(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - with tf.compat.v1.Session() as sess: - - sess.run(tf.compat.v1.global_variables_initializer()) - Q_np = Q._cov.eval(session=sess) + Q_np = Q._cov - colcov_np = colcov._cov.eval(session=sess) + A.T.dot(Q_np).dot(A) + colcov_np = colcov._cov + A.T.dot(Q_np).dot(A) - scipy_answer = np.sum(multivariate_normal.logpdf( - X, np.zeros([n]), colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X, np.zeros([n]), colcov_np)) - tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, A_tf, Q) - assert_allclose(scipy_answer, tf_answer.eval(session=sess), rtol=rtol) + tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, A_tf, Q) + assert_allclose(scipy_answer, tf_answer, rtol=rtol) From 1ec442517156e5ac4729b3a9c6f44ec9bc69ffaa Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 15:42:52 -0700 Subject: [PATCH 35/84] pack/unpack for fitting using scipy --- brainiak/matnormal/regression.py | 58 ++++++++++++-------------------- brainiak/matnormal/utils.py | 46 +++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 36 deletions(-) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 3fd9bb6ac..f82c484e9 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -1,8 +1,14 @@ import tensorflow as tf +import tensorflow_probability as tfp import numpy as np from sklearn.base import BaseEstimator from brainiak.matnormal.matnormal_likelihoods import matnorm_logp -from tensorflow.contrib.opt import ScipyOptimizerInterface +from brainiak.matnormal.utils import ( + pack_trainable_vars, + unpack_trainable_vars, + make_val_and_grad, +) +from scipy.optimize import minimize __all__ = ["MatnormalRegression"] @@ -28,8 +34,7 @@ class MatnormalRegression(BaseEstimator): """ - def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", - optCtrl=None): + def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", optCtrl=None): self.optCtrl, self.optMethod = optCtrl, optimizer self.time_cov = time_cov @@ -38,19 +43,11 @@ def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", self.n_t = time_cov.size self.n_v = space_cov.size - self.Y = tf.compat.v1.placeholder(tf.float64, [self.n_t, self.n_v], name="Y") - - self.X = tf.compat.v1.placeholder(tf.float64, [self.n_t, None], name="X") - - # create a tf session we reuse for this object - self.sess = tf.compat.v1.Session() - - # @define_scope - def logp(self): + def logp(self, X, Y): """ Log likelihood of model (internal) """ - y_hat = tf.matmul(self.X, self.beta) - resid = self.Y - y_hat + y_hat = tf.matmul(X, self.beta) + resid = Y - y_hat return matnorm_logp(resid, self.time_cov, self.space_cov) def fit(self, X, y): @@ -66,20 +63,12 @@ def fit(self, X, y): self.n_c = X.shape[1] - feed_dict = {self.X: X, self.Y: y} - self.sess.run(tf.compat.v1.global_variables_initializer(), feed_dict=feed_dict) - # initialize to the least squares solution (basically all # we need now is the cov) - sigma_inv_x = self.time_cov.solve(self.X).eval( - session=self.sess, feed_dict=feed_dict - ) - sigma_inv_y = self.time_cov.solve(self.Y).eval( - session=self.sess, feed_dict=feed_dict - ) + sigma_inv_x = self.time_cov.solve(X) + sigma_inv_y = self.time_cov.solve(y) - beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), - (X.T).dot(sigma_inv_y)) + beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) self.beta = tf.Variable(beta_init, name="beta") @@ -87,18 +76,16 @@ def fit(self, X, y): self.train_variables.extend(self.time_cov.get_optimize_vars()) self.train_variables.extend(self.space_cov.get_optimize_vars()) - self.sess.run(tf.compat.v1.variables_initializer([self.beta])) + val_and_grad = make_val_and_grad(self, extra_args=(X, y)) + x0 = pack_trainable_vars(self.train_variables) - optimizer = ScipyOptimizerInterface( - -self.logp(), - var_list=self.train_variables, - method=self.optMethod, - options=self.optCtrl, + opt_results = minimize( + fun=val_and_grad, x0=x0, args=(X, y), jac=True, method="L-BFGS-B" ) - optimizer.minimize(session=self.sess, feed_dict=feed_dict) - - self.beta_ = self.beta.eval(session=self.sess) + unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) + for var, val in zip(self.train_variables, unpacked_theta): + var = val def predict(self, X): """ Predict fMRI signal from design matrix. @@ -136,8 +123,7 @@ def calibrate(self, Y): # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) # (B Sigma_s^{-1} B')^{-1} - B_Sigma_Btrp = tf.matmul( - self.beta, Sigma_s_btrp).eval(session=self.sess) + B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp).eval(session=self.sess) X_test = np.linalg.solve(B_Sigma_Btrp.T, Y_Sigma_Btrp.T).T diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 30f8f18b4..d9b144d60 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -1,6 +1,7 @@ import tensorflow as tf from scipy.stats import norm from numpy.linalg import cholesky +import numpy as np def rmn(rowcov, colcov): @@ -32,3 +33,48 @@ def scaled_I(x, size): def quad_form_trp(x, y): """ x * y * x' """ return tf.matmul(x, tf.matmul(y, x, transpose_b=True)) + + +def pack_trainable_vars(trainable_vars): + """ + Pack trainable vars in a model into a single + vector that can be passed to scipy.optimize + """ + return tf.concat([tf.reshape(tv, (-1,)) for tv in trainable_vars], axis=0) + + +def unpack_trainable_vars(x, trainable_vars): + """ + Unpack trainable vars from a single vector as + used/returned by scipy.optimize + """ + + sizes = [tv.shape for tv in trainable_vars] + idxs = [np.prod(sz) for sz in sizes] + flatvars = tf.split(x, idxs) + return [tf.reshape(fv, tv.shape) for fv, tv in zip(flatvars, trainable_vars)] + + +def make_val_and_grad(model, lossfn=None, extra_args=None, train_vars=None): + + if train_vars is None: + train_vars = model.train_variables + + if lossfn is None: + lossfn = lambda theta: -model.logp(*extra_args) + + if extra_args is None: + extra_args = {} + + def val_and_grad(theta, *extra_args): + with tf.GradientTape() as tape: + tape.watch(train_vars) + unpacked_theta = unpack_trainable_vars(theta, train_vars) + for var, val in zip(train_vars, unpacked_theta): + var = val + loss = lossfn(theta) + grad = tape.gradient(loss, train_vars) + packed_grad = pack_trainable_vars(grad) + return loss.numpy(), packed_grad.numpy() + + return val_and_grad From d032b1aeb007ba3578402f539752c948d262087e Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 16:04:23 -0700 Subject: [PATCH 36/84] do cholesky covs better now --- brainiak/matnormal/covs.py | 155 +++++++++++++++++++----------------- tests/matnormal/test_cov.py | 64 +++++---------- 2 files changed, 102 insertions(+), 117 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 9b28d10f3..85abd5f76 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -141,11 +141,9 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): if scan_onsets is None: self.run_sizes = [size] self.offdiag_template = tf.constant( - scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)]), - dtype=tf.float64 + scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)]), dtype=tf.float64 ) - self.diag_template = tf.constant(np.diag( - np.r_[0, np.ones(size - 2), 0])) + self.diag_template = tf.constant(np.diag(np.r_[0, np.ones(size - 2), 0])) else: self.run_sizes = np.ediff1d(np.r_[scan_onsets, size]) sub_offdiags = [ @@ -155,8 +153,7 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): self.offdiag_template = tf.constant( scipy.sparse.block_diag(sub_offdiags).toarray() ) - subdiags = [np.diag(np.r_[0, np.ones(r - 2), 0]) - for r in self.run_sizes] + subdiags = [np.diag(np.r_[0, np.ones(r - 2), 0]) for r in self.run_sizes] self.diag_template = tf.constant( scipy.sparse.block_diag(subdiags).toarray() ) @@ -175,8 +172,7 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): tf.random.normal([1], dtype=tf.float64), name="rho" ) else: - self.rho_unc = tf.Variable( - 2 * tf.sigmoid(self.rho_unc) - 1, name="rho") + self.rho_unc = tf.Variable(2 * tf.sigmoid(self.rho_unc) - 1, name="rho") @property def logdet(self): @@ -187,8 +183,9 @@ def logdet(self): sigma = tf.exp(self.log_sigma) # now compute logdet return tf.reduce_sum( - input_tensor=2 * tf.constant(self.run_sizes, dtype=tf.float64) * - tf.math.log(sigma) + input_tensor=2 + * tf.constant(self.run_sizes, dtype=tf.float64) + * tf.math.log(sigma) - tf.math.log(1 - tf.square(rho)) ) @@ -204,8 +201,11 @@ def _prec(self): rho = 2 * tf.sigmoid(self.rho_unc) - 1 sigma = tf.exp(self.log_sigma) - return (self._identity_mat - rho * self.offdiag_template + - rho ** 2 * self.diag_template) / tf.square(sigma) + return ( + self._identity_mat + - rho * self.offdiag_template + + rho ** 2 * self.diag_template + ) / tf.square(sigma) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to @@ -283,8 +283,7 @@ def __init__(self, size, diag_var=None): tf.random.normal([size], dtype=tf.float64), name="precisions" ) else: - self.logprec = tf.Variable( - np.log(1 / diag_var), name="log-precisions") + self.logprec = tf.Variable(np.log(1 / diag_var), name="log-precisions") self.prec = tf.exp(self.logprec) self.prec_dimaugmented = tf.expand_dims(self.prec, -1) @@ -333,7 +332,7 @@ class CovUnconstrainedCholesky(CovBase): def __init__(self, size=None, Sigma=None): if size is None and Sigma is None: - raise RuntimeError("Must pass either Sigma or size") + raise RuntimeError("Must pass either Sigma or size but not both") if size is not None and Sigma is not None: raise RuntimeError("Must pass either Sigma or size but not both") @@ -343,45 +342,37 @@ def __init__(self, size=None, Sigma=None): super(CovUnconstrainedCholesky, self).__init__(size) + # number of parameters in the triangular mat + npar = (size * (size + 1)) // 2 + if Sigma is None: - self.L_full = tf.Variable( - tf.random.normal([size, size], dtype=tf.float64), - name="L_full", - dtype="float64", + self.L_flat = tf.Variable( + tf.random.normal([npar], dtype=tf.float64), name="L_flat" ) else: - # in order to respect the Sigma we got passed in, we log the diag - # which we will later exp. a little ugly but this - # is a rare use case L = np.linalg.cholesky(Sigma) + # log diag since we exp it later for unique + # parameterization L[np.diag_indices_from(L)] = np.log(np.diag(L)) - self.L_full = tf.Variable(L, name="L_full", dtype="float64") - - # Zero out triu of L_full to get cholesky L. - # This seems dumb but TF is smart enough to set the gradient to zero - # for those elements, and the alternative (fill_lower_triangular from - # contrib.distributions) is inefficient and recommends not doing the - # packing (for now). - # Also: to make the parameterization unique we exp the diagonal so - # it's positive. - - L_indeterminate = tf.linalg.band_part(self.L_full, -1, 0) - self.L = tf.linalg.set_diag( - L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) - ) + self.L_flat = tfp.math.fill_triangular_inverse(L) @property - def logdet(self): + def L(self): + L = tfp.math.fill_triangular(self.L_flat) + # exp diag for unique parameterization + L = tf.linalg.set_diag(L, tf.exp(tf.linalg.diag_part(L))) + return L - # We save a log here by using the diag of L_full - return 2 * tf.reduce_sum(input_tensor=(tf.linalg.diag_part(self.L_full))) + @property + def logdet(self): + return 2 * tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.diag_part(self.L))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit this covariance """ - return [self.L_full] + return [self.L_flat] def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` @@ -427,44 +418,56 @@ class CovUnconstrainedInvCholesky(CovBase): this saves a cholesky solve on every step of optimization """ - def __init__(self, size, invSigma=None): + def __init__(self, size=None, invSigma=None): + + if size is None and invSigma is None: + raise RuntimeError("Must pass either invSigma or size but not both") + + if size is not None and invSigma is not None: + raise RuntimeError("Must pass either invSigma or size but not both") + + if invSigma is not None: + size = invSigma.shape[0] + super(CovUnconstrainedInvCholesky, self).__init__(size) + # number of parameters in the triangular mat + npar = (size * (size + 1)) // 2 + if invSigma is None: - self.Linv_full = tf.Variable( - tf.random.normal([size, size], dtype=tf.float64), - name="Linv_full") + self.Linv_flat = tf.Variable( + tf.random.normal([npar], dtype=tf.float64), name="Linv_flat" + ) + else: - self.Linv_full = tf.Variable( - np.linalg.cholesky(invSigma), name="Linv_full") - - # Zero out triu of L_full to get cholesky L. - # This seems dumb but TF is smart enough to set the gradient to zero - # for those elements, and the alternative (fill_lower_triangular from - # contrib.distributions) is inefficient and recommends not doing the - # packing (for now). - # Also: to make the parameterization unique we log the diagonal so - # it's positive. - L_indeterminate = tf.linalg.band_part(self.Linv_full, -1, 0) - self.Linv = tf.linalg.set_diag( - L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) - ) + Linv = np.linalg.cholesky(invSigma) + # log diag since we exp it later for unique + # parameterization + Linv[np.diag_indices_from(Linv)] = np.log(np.diag(Linv)) + self.Linv_flat = tfp.math.fill_triangular_inverse(Linv) + + @property + def Linv(self): + Linv = tfp.math.fill_triangular(self.Linv_flat) + # exp diag for unique parameterization + Linv = tf.linalg.set_diag(Linv, tf.exp(tf.linalg.diag_part(Linv))) + return Linv @property def logdet(self): - return -2 * tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.diag_part(self.Linv))) + return -2 * tf.reduce_sum( + input_tensor=tf.math.log(tf.linalg.diag_part(self.Linv)) + ) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit - this covariance + this covariance """ - return [self.Linv_full] + return [self.Linv_flat] def solve(self, X): - """ - Given this Sigma and some X, compute :math:`Sigma^{-1} * x` using - matmul (since we're parameterized by L_inv) - + """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + using cholesky solve Parameters ---------- X: tf.Tensor @@ -521,15 +524,13 @@ def __init__(self, sizes, Sigmas=None, mask=None): ] else: self.L_full = [ - tf.Variable(np.linalg.cholesky( - Sigmas[i]), name="L" + str(i) + "_full") + tf.Variable(np.linalg.cholesky(Sigmas[i]), name="L" + str(i) + "_full") for i in range(self.nfactors) ] self.mask = mask # make a list of choleskys - L_indeterminate = [tf.linalg.band_part( - mat, -1, 0) for mat in self.L_full] + L_indeterminate = [tf.linalg.band_part(mat, -1, 0) for mat in self.L_full] self.L = [ tf.linalg.set_diag(mat, tf.exp(tf.linalg.diag_part(mat))) for mat in L_indeterminate @@ -546,12 +547,17 @@ def logdet(self): """ log|Sigma| using the diagonals of the cholesky factors. """ if self.mask is None: - n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) - for mat in self.L]) + n_list = tf.stack( + [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in self.L] + ) n_prod = tf.reduce_prod(input_tensor=n_list) logdet = tf.stack( - [tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.tensor_diag_part(mat))) - for mat in self.L] + [ + tf.reduce_sum( + input_tensor=tf.math.log(tf.linalg.tensor_diag_part(mat)) + ) + for mat in self.L + ] ) logdetfinal = tf.reduce_sum(input_tensor=(logdet * n_prod) / n_list) else: @@ -562,7 +568,8 @@ def logdet(self): indices = list(range(self.nfactors)) indices.remove(i) logdet += tf.math.log(tf.linalg.tensor_diag_part(self.L[i])) * tf.cast( - tf.reduce_sum(input_tensor=mask_reshaped, axis=indices), dtype=tf.float64 + tf.reduce_sum(input_tensor=mask_reshaped, axis=indices), + dtype=tf.float64, ) logdetfinal = tf.reduce_sum(input_tensor=logdet) return 2.0 * logdetfinal diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 6782998b7..f453fabf5 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -19,7 +19,6 @@ import logging - logging.basicConfig(level=logging.DEBUG) # X is m x n, so A sould be m x p @@ -80,8 +79,7 @@ def test_CovConstant(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_CovIdentity(): @@ -93,8 +91,7 @@ def test_CovIdentity(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_CovIsotropic(): @@ -106,8 +103,7 @@ def test_CovIsotropic(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_CovDiagonal(): @@ -119,8 +115,7 @@ def test_CovDiagonal(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_CovDiagonal_initialized(): @@ -132,15 +127,13 @@ def test_CovDiagonal_initialized(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_CovDiagonalGammaPrior(): cov_np = np.diag(np.exp(np.random.normal(size=m))) - cov = CovDiagonalGammaPrior( - size=m, sigma=np.diag(cov_np), alpha=1.5, beta=1e-10) + cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, beta=1e-10) ig = invgamma(1.5, scale=1e-10) @@ -149,8 +142,7 @@ def test_CovDiagonalGammaPrior(): penalty_np = np.sum(ig.logpdf(1 / np.diag(cov_np))) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) assert_allclose(penalty_np, cov.logp, rtol=rtol) @@ -163,8 +155,7 @@ def test_CovUnconstrainedCholesky(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_CovUnconstrainedCholeskyWishartReg(): @@ -177,8 +168,7 @@ def test_CovUnconstrainedCholeskyWishartReg(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) # now compute the regularizer reg = wishart.logpdf(cov_np, df=m + 2, scale=1e10 * np.eye(m)) assert_allclose(reg, cov.logp, rtol=rtol) @@ -187,7 +177,7 @@ def test_CovUnconstrainedCholeskyWishartReg(): def test_CovUnconstrainedInvCholesky(): init = invwishart.rvs(scale=np.eye(m), df=m + 2) - cov = CovUnconstrainedInvCholesky(size=m, invSigma=init) + cov = CovUnconstrainedInvCholesky(invSigma=init) Linv = cov.Linv L = np.linalg.inv(Linv) @@ -196,8 +186,7 @@ def test_CovUnconstrainedInvCholesky(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_Cov2FactorKron(): @@ -213,14 +202,12 @@ def test_Cov2FactorKron(): L1 = (cov.L[0]).numpy() L2 = (cov.L[1]).numpy() - cov_np = np.kron(np.dot(L1, L1.transpose()), - np.dot(L2, L2.transpose())) + cov_np = np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose())) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_Cov3FactorKron(): @@ -230,7 +217,6 @@ def test_Cov3FactorKron(): dim3 = 2 cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3]) - L1 = (cov.L[0]).numpy() L2 = (cov.L[1]).numpy() L3 = (cov.L[2]).numpy() @@ -242,8 +228,7 @@ def test_Cov3FactorKron(): assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_Cov3FactorMaskedKron(): @@ -263,25 +248,19 @@ def test_Cov3FactorMaskedKron(): L1 = (cov.L[0]).numpy() L2 = (cov.L[1]).numpy() L3 = (cov.L[2]).numpy() - cov_np_factor = np.kron(L1, np.kron(L2, L3))[ - np.ix_(mask_indices, mask_indices)] + cov_np_factor = np.kron(L1, np.kron(L2, L3))[np.ix_(mask_indices, mask_indices)] cov_np = np.dot(cov_np_factor, cov_np_factor.transpose()) - logdet_np, sinv_np, sinvx_np = logdet_sinv_np( - X[mask_indices, :], cov_np) + logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X[mask_indices, :], cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol, atol=atol) assert_allclose( sinv_np, - cov.solve(eye).numpy()[ - np.ix_(mask_indices, mask_indices)], + cov.solve(eye).numpy()[np.ix_(mask_indices, mask_indices)], rtol=rtol, atol=atol, ) assert_allclose( - sinvx_np, - cov.solve(X_tf).numpy()[mask_indices, :], - rtol=rtol, - atol=atol, + sinvx_np, cov.solve(X_tf).numpy()[mask_indices, :], rtol=rtol, atol=atol ) @@ -293,8 +272,7 @@ def test_CovAR1(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) def test_CovAR1_scan_onsets(): @@ -306,5 +284,5 @@ def test_CovAR1_scan_onsets(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) - assert_allclose(sinvx_np, cov.solve( - X_tf), rtol=rtol) + assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) + From ed63f6d147d4bf7cbe6888b23d60b6874fa4905d Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 16:04:37 -0700 Subject: [PATCH 37/84] test for new packing/unpacking utils --- tests/matnormal/test_matnormal_utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 tests/matnormal/test_matnormal_utils.py diff --git a/tests/matnormal/test_matnormal_utils.py b/tests/matnormal/test_matnormal_utils.py new file mode 100644 index 000000000..1a571139a --- /dev/null +++ b/tests/matnormal/test_matnormal_utils.py @@ -0,0 +1,15 @@ +from brainiak.matnormal.utils import pack_trainable_vars, unpack_trainable_vars +import tensorflow as tf + + +def test_pack_unpack(): + + shapes = [[2, 3], [3], [3, 4, 2], [1, 5]] + mats = [tf.random.stateless_normal( + shape=shape, seed=[0, 0]) for shape in shapes] + flatmats = pack_trainable_vars(mats) + unflatmats = unpack_trainable_vars(flatmats, mats) + with tf.compat.v1.Session() as sess: + for mat_in, mat_out in zip(mats, unflatmats): + assert tf.math.reduce_all( + tf.equal(mat_in, mat_out)).eval(session=sess) From 1db00800f4cbcae18ca8a781a6fce3901bbc1c09 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 16:04:51 -0700 Subject: [PATCH 38/84] tests now pass eager mode --- tests/matnormal/test_matnormal_regression.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 975e53e53..806aa9a91 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -1,5 +1,7 @@ import numpy as np from scipy.stats import norm, wishart, pearsonr +import tensorflow as tf + from brainiak.matnormal.covs import ( CovIdentity, CovUnconstrainedCholesky, @@ -28,17 +30,17 @@ def test_matnorm_regression_unconstrained(): Y_hat = X.dot(B) rowcov_true = np.eye(m) colcov_true = wishart.rvs(p + 2, np.eye(p)) - - Y = Y_hat + rmn(rowcov_true, colcov_true) + + y = Y_hat + rmn(rowcov_true, colcov_true) row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedCholesky(size=p) model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) - model.fit(X, Y) + model.fit(X, y) - assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol def test_matnorm_regression_unconstrainedprec(): @@ -60,7 +62,7 @@ def test_matnorm_regression_unconstrainedprec(): model.fit(X, Y) - assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol def test_matnorm_regression_optimizerChoice(): @@ -83,7 +85,7 @@ def test_matnorm_regression_optimizerChoice(): model.fit(X, Y) - assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol def test_matnorm_regression_scaledDiag(): @@ -106,4 +108,4 @@ def test_matnorm_regression_scaledDiag(): model.fit(X, Y) - assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol From b134a0530dd2fc2398208a2a2e21974aca07e7ce Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 16:29:59 -0700 Subject: [PATCH 39/84] pull out repeated code as a util --- brainiak/matnormal/covs.py | 27 ++++++++++----------------- brainiak/matnormal/utils.py | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 85abd5f76..18609cb14 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -5,7 +5,12 @@ import scipy.sparse import tensorflow_probability as tfp -from brainiak.matnormal.utils import x_tx, xx_t +from brainiak.matnormal.utils import ( + x_tx, + xx_t, + unflatten_cholesky_unique, + flatten_cholesky_unique, +) from brainiak.utils.kronecker_solvers import ( tf_solve_lower_triangular_kron, tf_solve_upper_triangular_kron, @@ -352,17 +357,11 @@ def __init__(self, size=None, Sigma=None): else: L = np.linalg.cholesky(Sigma) - # log diag since we exp it later for unique - # parameterization - L[np.diag_indices_from(L)] = np.log(np.diag(L)) - self.L_flat = tfp.math.fill_triangular_inverse(L) + self.L_flat = tf.Variable(flatten_cholesky_unique(L), name="L_flat") @property def L(self): - L = tfp.math.fill_triangular(self.L_flat) - # exp diag for unique parameterization - L = tf.linalg.set_diag(L, tf.exp(tf.linalg.diag_part(L))) - return L + return unflatten_cholesky_unique(self.L_flat) @property def logdet(self): @@ -441,17 +440,11 @@ def __init__(self, size=None, invSigma=None): else: Linv = np.linalg.cholesky(invSigma) - # log diag since we exp it later for unique - # parameterization - Linv[np.diag_indices_from(Linv)] = np.log(np.diag(Linv)) - self.Linv_flat = tfp.math.fill_triangular_inverse(Linv) + self.Linv_flat = tf.Variable(flatten_cholesky_unique(Linv), name="Linv_flat") @property def Linv(self): - Linv = tfp.math.fill_triangular(self.Linv_flat) - # exp diag for unique parameterization - Linv = tf.linalg.set_diag(Linv, tf.exp(tf.linalg.diag_part(Linv))) - return Linv + return unflatten_cholesky_unique(self.Linv_flat) @property def logdet(self): diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index d9b144d60..581306290 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -1,4 +1,5 @@ import tensorflow as tf +import tensorflow_probability as tfp from scipy.stats import norm from numpy.linalg import cholesky import numpy as np @@ -35,6 +36,29 @@ def quad_form_trp(x, y): return tf.matmul(x, tf.matmul(y, x, transpose_b=True)) +def flatten_cholesky_unique(L): + """ + Flattens nonzero-elements Cholesky (triangular) factor + into a vector, and logs diagonal to make parameterizaation + unique. Inverse of unflatten_cholesky_unique. + """ + L[np.diag_indices_from(L)] = np.log(np.diag(L)) + L_flat = tfp.math.fill_triangular_inverse(L) + return L_flat + + +def unflatten_cholesky_unique(L_flat): + """ + Converts a vector of elements into a triangular matrix + (Cholesky factor). Exponentiates diagonal to make + parameterizaation unique. Inverse of flatten_cholesky_unique. + """ + L = tfp.math.fill_triangular(L_flat) + # exp diag for unique parameterization + L = tf.linalg.set_diag(L, tf.exp(tf.linalg.diag_part(L))) + return L + + def pack_trainable_vars(trainable_vars): """ Pack trainable vars in a model into a single From 7ffbe912faf42aed28b7bbd79cefa8fd66d0386a Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 16:30:22 -0700 Subject: [PATCH 40/84] Follow more standard sklearn API where est params come out as trailing underscore --- brainiak/matnormal/regression.py | 2 ++ tests/matnormal/test_matnormal_regression.py | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index f82c484e9..81f7e24f7 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -87,6 +87,8 @@ def fit(self, X, y): for var, val in zip(self.train_variables, unpacked_theta): var = val + self.beta_ = self.beta.numpy() + def predict(self, X): """ Predict fMRI signal from design matrix. diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 806aa9a91..f4338f7b2 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -40,7 +40,7 @@ def test_matnorm_regression_unconstrained(): model.fit(X, y) - assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol def test_matnorm_regression_unconstrainedprec(): @@ -62,7 +62,7 @@ def test_matnorm_regression_unconstrainedprec(): model.fit(X, Y) - assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol def test_matnorm_regression_optimizerChoice(): @@ -85,7 +85,7 @@ def test_matnorm_regression_optimizerChoice(): model.fit(X, Y) - assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol def test_matnorm_regression_scaledDiag(): @@ -108,4 +108,4 @@ def test_matnorm_regression_scaledDiag(): model.fit(X, Y) - assert pearsonr(B.flatten(), model.beta.numpy().flatten())[0] >= corrtol + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol From 9a6b4b7a80dae508e696e774ac26174c9dd94fde Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 16:30:32 -0700 Subject: [PATCH 41/84] mnrsa now works with eager, tests pass --- brainiak/matnormal/mnrsa.py | 100 ++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 45 deletions(-) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 07bea456f..c7fc36243 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -5,8 +5,16 @@ from brainiak.utils.utils import cov2corr import numpy as np from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row -from tensorflow.contrib.opt import ScipyOptimizerInterface +from brainiak.matnormal.utils import ( + pack_trainable_vars, + unpack_trainable_vars, + make_val_and_grad, + unflatten_cholesky_unique, + flatten_cholesky_unique, +) + import tensorflow.compat.v1.logging as tflog +from scipy.optimize import minimize __all__ = ["MNRSA"] @@ -53,8 +61,9 @@ class MNRSA(BaseEstimator): """ - def __init__(self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", - optCtrl=None): + def __init__( + self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", optCtrl=None + ): self.n_T = time_cov.size self.n_V = space_cov.size @@ -62,10 +71,6 @@ def __init__(self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", self.optCtrl, self.optMethod = optCtrl, optimizer - # placeholders for inputs - self.X = tf.compat.v1.placeholder(tf.float64, [self.n_T, None], name="Design") - self.Y = tf.compat.v1.placeholder(tf.float64, [self.n_T, self.n_V], name="Brain") - self.X_0 = tf.Variable( tf.random.normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0" ) @@ -81,6 +86,10 @@ def __init__(self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", # create a tf session we reuse for this object self.sess = tf.compat.v1.Session() + @property + def L(self): + return unflatten_cholesky_unique(self.L_flat) + def fit(self, X, y, structured_RSA_cov=None): """ Estimate dimension reduction and cognitive model parameters @@ -101,69 +110,70 @@ def fit(self, X, y, structured_RSA_cov=None): """ - # self.sess.run(tf.global_variables_initializer()) - - feed_dict = {self.X: y, self.Y: X} + # In the method signature we follow sklearn discriminative API + # where brain is X and behavior is y. Internally we are + # generative so we flip this here + X, Y = y, X - self.n_c = y.shape[1] + self.n_c = X.shape[1] # initialize from naive RSA m = LinearRegression(fit_intercept=False) - # counterintuitive given sklearn interface above: - # brain is passed in as X and design is passed in as y - m.fit(X=y, y=X) + m.fit(X=X, y=Y) self.naive_U_ = np.cov(m.coef_.T) naiveRSA_L = np.linalg.cholesky(self.naive_U_) self.naive_C_ = cov2corr(self.naive_U_) - self.L_full = tf.Variable(naiveRSA_L, name="L_full", dtype="float64") - L_indeterminate = tf.linalg.band_part(self.L_full, -1, 0) - self.L = tf.linalg.set_diag( - L_indeterminate, tf.exp(tf.linalg.diag_part(L_indeterminate)) + self.L_flat = tf.Variable( + flatten_cholesky_unique(naiveRSA_L), name="L_flat", dtype="float64" ) - self.train_variables.extend([self.L_full]) - - self.x_stack = tf.concat([tf.matmul(self.X, self.L), self.X_0], 1) - self.sess.run(tf.compat.v1.global_variables_initializer(), feed_dict=feed_dict) - - optimizer = ScipyOptimizerInterface( - -self.logp(), - var_list=self.train_variables, - method=self.optMethod, - options=self.optCtrl, + self.train_variables.extend([self.L_flat]) + + # logging_ops.append( + # tf.print( + # "min(grad): ", + # tf.reduce_min(input_tensor=optimizer._packed_loss_grad), + # output_stream=tflog.info, + # ) + # ) + # logging_ops.append( + # tf.print( + # "max(grad): ", + # tf.reduce_max(input_tensor=optimizer._packed_loss_grad), + # output_stream=tflog.info, + # ) + # ) + # logging_ops.append(tf.print("logp", self.logp(), output_stream=tflog.info)) + # self.sess.run(logging_ops, feed_dict=feed_dict) + val_and_grad = make_val_and_grad(self, extra_args=(X, Y)) + x0 = pack_trainable_vars(self.train_variables) + + opt_results = minimize( + fun=val_and_grad, x0=x0, args=(X, Y), jac=True, method="L-BFGS-B" ) - logging_ops = [] - logging_ops.append(tf.print("min(grad): ", tf.reduce_min( - input_tensor=optimizer._packed_loss_grad), output_stream=tflog.info)) - logging_ops.append(tf.print("max(grad): ", tf.reduce_max( - input_tensor=optimizer._packed_loss_grad), output_stream=tflog.info)) - logging_ops.append( - tf.print("logp", self.logp(), output_stream=tflog.info)) - self.sess.run(logging_ops, feed_dict=feed_dict) + unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) + for var, val in zip(self.train_variables, unpacked_theta): + var = val - optimizer.minimize(session=self.sess, feed_dict=feed_dict) - - self.L_ = self.L.eval(session=self.sess) - self.X_0_ = self.X_0.eval(session=self.sess) - self.U_ = self.L_.dot(self.L_.T) + self.U_ = self.L.numpy().dot(self.L.numpy().T) self.C_ = cov2corr(self.U_) - def logp(self): + def logp(self, X, Y): """ MNRSA Log-likelihood""" rsa_cov = CovIdentity(size=self.n_c + self.n_nureg) - + x_stack = tf.concat([tf.matmul(X, self.L), self.X_0], 1) return ( self.time_cov.logp + self.space_cov.logp + rsa_cov.logp + matnorm_logp_marginal_row( - self.Y, + Y, row_cov=self.time_cov, col_cov=self.space_cov, - marg=self.x_stack, + marg=x_stack, marg_cov=rsa_cov, ) ) From b041460b37534a906d94486309ba14bacb229288 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 16:42:50 -0700 Subject: [PATCH 42/84] final removal of tf print and session stuff --- brainiak/matnormal/matnormal_likelihoods.py | 58 ++++++++++----------- brainiak/matnormal/mnrsa.py | 24 ++------- tests/matnormal/test_matnormal_utils.py | 7 ++- 3 files changed, 36 insertions(+), 53 deletions(-) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 0adf07fa3..91f0e093b 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -53,21 +53,24 @@ def solve_det_marginal(x, sigma, A, Q): # For diagnostics, we want to check condition numbers # of things we invert. This includes Q and Sigma, as well # as the "lemma factor" for lack of a better definition - if logging.getLogger().isEnabledFor(logging.DEBUG): - logging.log(logging.DEBUG, - "Printing diagnostics for solve_det_marginal") - A = tf.compat.v1.Print(A, [_condition(Q._prec + tf.matmul(A, sigma.solve(A), - transpose_a=True))], - "lemma_factor condition") - A = tf.compat.v1.Print(A, [_condition(Q._cov)], "Q condition") - A = tf.compat.v1.Print(A, [_condition(sigma._cov)], "sigma condition") - A = tf.compat.v1.Print(A, [tf.reduce_max(input_tensor=A), tf.reduce_min(input_tensor=A)], "A minmax") + logging.log(logging.DEBUG, "Printing diagnostics for solve_det_marginal") + logging.log( + logging.DEBUG, + f"lemma_factor condition={_condition(Q._prec + tf.matmul(A, sigma.solve(A),transpose_a=True))}", + ) + logging.log(logging.DEBUG, f"Q condition={_condition(Q._cov)}") + logging.log(logging.DEBUG, f"sigma condition={_condition(sigma._cov)}") + logging.log( + logging.DEBUG, + f"sigma max={tf.reduce_max(input_tensor=A)}, sigma min={tf.reduce_min(input_tensor=A)}", + ) # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like # a schur complement by isn't, so we call it the "lemma factor" # since we use it in woodbury and matrix determinant lemmas - lemma_factor = tlinalg.cholesky(Q._prec + tf.matmul(A, sigma.solve(A), - transpose_a=True)) + lemma_factor = tlinalg.cholesky( + Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True) + ) logdet = ( Q.logdet @@ -75,14 +78,12 @@ def solve_det_marginal(x, sigma, A, Q): + 2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor))) ) - if logging.getLogger().isEnabledFor(logging.DEBUG): - logdet = tf.compat.v1.Print(logdet, [Q.logdet], "Q logdet") - logdet = tf.compat.v1.Print(logdet, [sigma.logdet], "sigma logdet") - logdet = tf.compat.v1.Print( - logdet, - [2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor)))], - "iqf logdet", - ) + logging.log(logging.DEBUG, f"Log-determinant of Q={Q.logdet}") + logging.log(logging.DEBUG, f"sigma logdet={sigma.logdet}") + logging.log( + logging.DEBUG, + f"lemma factor logdet={2* tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor)))}", + ) # A' Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) @@ -125,7 +126,8 @@ def solve_det_conditional(x, sigma, A, Q): # (Q - A' Sigma^{-1} A) lemma_factor = tlinalg.cholesky( - Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True)) + Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True) + ) logdet = ( -Q.logdet @@ -168,16 +170,14 @@ def _mnorm_logp_internal( """ log2pi = 1.8378770664093453 - if logging.getLogger().isEnabledFor(logging.DEBUG): - solve_row = tf.compat.v1.Print( - solve_row, [tlinalg.trace(solve_col)], "coltrace") - solve_row = tf.compat.v1.Print( - solve_row, [tlinalg.trace(solve_row)], "rowtrace") - solve_row = tf.compat.v1.Print(solve_row, [logdet_row], "logdet_row") - solve_row = tf.compat.v1.Print(solve_row, [logdet_col], "logdet_col") + logging.log(logging.DEBUG, f"column precision trace ={tlinalg.trace(solve_col)}") + logging.log(logging.DEBUG, f"row precision trace ={tlinalg.trace(solve_row)}") + logging.log(logging.DEBUG, f"row cov logdet ={logdet_row}") + logging.log(logging.DEBUG, f"col cov logdet ={logdet_col}") - denominator = (-rowsize * colsize * log2pi - - colsize * logdet_row - rowsize * logdet_col) + denominator = ( + -rowsize * colsize * log2pi - colsize * logdet_row - rowsize * logdet_col + ) numerator = -tlinalg.trace(tf.matmul(solve_col, solve_row)) return 0.5 * (numerator + denominator) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index c7fc36243..4ff7d574a 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -83,11 +83,11 @@ def __init__( self.train_variables.extend(self.time_cov.get_optimize_vars()) self.train_variables.extend(self.space_cov.get_optimize_vars()) - # create a tf session we reuse for this object - self.sess = tf.compat.v1.Session() - @property def L(self): + """ + Cholesky factor of the RSA matrix. + """ return unflatten_cholesky_unique(self.L_flat) def fit(self, X, y, structured_RSA_cov=None): @@ -111,7 +111,7 @@ def fit(self, X, y, structured_RSA_cov=None): """ # In the method signature we follow sklearn discriminative API - # where brain is X and behavior is y. Internally we are + # where brain is X and behavior is y. Internally we are # generative so we flip this here X, Y = y, X @@ -130,22 +130,6 @@ def fit(self, X, y, structured_RSA_cov=None): self.train_variables.extend([self.L_flat]) - # logging_ops.append( - # tf.print( - # "min(grad): ", - # tf.reduce_min(input_tensor=optimizer._packed_loss_grad), - # output_stream=tflog.info, - # ) - # ) - # logging_ops.append( - # tf.print( - # "max(grad): ", - # tf.reduce_max(input_tensor=optimizer._packed_loss_grad), - # output_stream=tflog.info, - # ) - # ) - # logging_ops.append(tf.print("logp", self.logp(), output_stream=tflog.info)) - # self.sess.run(logging_ops, feed_dict=feed_dict) val_and_grad = make_val_and_grad(self, extra_args=(X, Y)) x0 = pack_trainable_vars(self.train_variables) diff --git a/tests/matnormal/test_matnormal_utils.py b/tests/matnormal/test_matnormal_utils.py index 1a571139a..fd6424e48 100644 --- a/tests/matnormal/test_matnormal_utils.py +++ b/tests/matnormal/test_matnormal_utils.py @@ -9,7 +9,6 @@ def test_pack_unpack(): shape=shape, seed=[0, 0]) for shape in shapes] flatmats = pack_trainable_vars(mats) unflatmats = unpack_trainable_vars(flatmats, mats) - with tf.compat.v1.Session() as sess: - for mat_in, mat_out in zip(mats, unflatmats): - assert tf.math.reduce_all( - tf.equal(mat_in, mat_out)).eval(session=sess) + for mat_in, mat_out in zip(mats, unflatmats): + assert tf.math.reduce_all( + tf.equal(mat_in, mat_out)) From cee8c8159fce84780a38f877de6121ba60e9cced Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 18:44:12 -0700 Subject: [PATCH 43/84] typo fixes --- brainiak/matnormal/__init__.py | 4 ++-- brainiak/matnormal/covs.py | 7 +++---- brainiak/utils/kronecker_solvers.py | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index d9fc27056..e167ac329 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -58,7 +58,7 @@ The `brainiak.matnormal` package provides structure to infer models that can be stated in the matrix-normal notation that are useful for fMRI analysis. -It provides a few interfaces. `MatnormModelBase` is intended is intended as a +It provides a few interfaces. `MatnormModelBase` is intended as a base class for matrix-variate models. It provides a wrapper for the tensorflow optimizer that provides convergence checks based on thresholds on the function value and gradient, and simple verbose outputs. It also provides an interface @@ -85,7 +85,7 @@ \\Sigma_{\\mathbf{Z}_i}, \\Sigma_{\\mathbf{Z}_k})\\\\ -We vectorize, and covert to a form we recognize as +We vectorize, and convert to a form we recognize as $y \\sim \\mathcal{N}(Mx+b, \\Sigma)$. .. math:: diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 9b28d10f3..8fe2aa056 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -165,10 +165,10 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): if sigma is None: self.log_sigma = tf.Variable( - tf.random.normal([1], dtype=tf.float64), name="sigma" + tf.random.normal([1], dtype=tf.float64), name="log_sigma" ) else: - self.log_sigma = tf.Variable(np.log(sigma), name="sigma") + self.log_sigma = tf.Variable(np.log(sigma), name="log_sigma") if rho is None: self.rho_unc = tf.Variable( @@ -184,11 +184,10 @@ def logdet(self): """ # first, unconstrain rho and sigma rho = 2 * tf.sigmoid(self.rho_unc) - 1 - sigma = tf.exp(self.log_sigma) # now compute logdet return tf.reduce_sum( input_tensor=2 * tf.constant(self.run_sizes, dtype=tf.float64) * - tf.math.log(sigma) + self.log_sigma - tf.math.log(1 - tf.square(rho)) ) diff --git a/brainiak/utils/kronecker_solvers.py b/brainiak/utils/kronecker_solvers.py index ce32c12db..99d5d2d01 100644 --- a/brainiak/utils/kronecker_solvers.py +++ b/brainiak/utils/kronecker_solvers.py @@ -101,7 +101,7 @@ def tf_solve_upper_triangular_kron(L, y): def tf_kron_mult(L, x): """ Tensorflow multiply with kronecker product matrix - Returs kron(L[0], L[1] ...) * x + Returns kron(L[0], L[1] ...) * x Arguments --------- From 6e9508266308b64fd6dcc7e0f7123ee8b48c43e8 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 18:44:18 -0700 Subject: [PATCH 44/84] likelihood docstrings --- brainiak/matnormal/matnormal_likelihoods.py | 140 +++++++++++++++----- 1 file changed, 107 insertions(+), 33 deletions(-) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 0adf07fa3..22b921bf4 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -54,20 +54,26 @@ def solve_det_marginal(x, sigma, A, Q): # of things we invert. This includes Q and Sigma, as well # as the "lemma factor" for lack of a better definition if logging.getLogger().isEnabledFor(logging.DEBUG): - logging.log(logging.DEBUG, - "Printing diagnostics for solve_det_marginal") - A = tf.compat.v1.Print(A, [_condition(Q._prec + tf.matmul(A, sigma.solve(A), - transpose_a=True))], - "lemma_factor condition") + logging.log(logging.DEBUG, "Printing diagnostics for solve_det_marginal") + A = tf.compat.v1.Print( + A, + [_condition(Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True))], + "lemma_factor condition", + ) A = tf.compat.v1.Print(A, [_condition(Q._cov)], "Q condition") A = tf.compat.v1.Print(A, [_condition(sigma._cov)], "sigma condition") - A = tf.compat.v1.Print(A, [tf.reduce_max(input_tensor=A), tf.reduce_min(input_tensor=A)], "A minmax") + A = tf.compat.v1.Print( + A, + [tf.reduce_max(input_tensor=A), tf.reduce_min(input_tensor=A)], + "A minmax", + ) # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like - # a schur complement by isn't, so we call it the "lemma factor" + # a schur complement but isn't, so we call it the "lemma factor" # since we use it in woodbury and matrix determinant lemmas - lemma_factor = tlinalg.cholesky(Q._prec + tf.matmul(A, sigma.solve(A), - transpose_a=True)) + lemma_factor = tlinalg.cholesky( + Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True) + ) logdet = ( Q.logdet @@ -80,7 +86,12 @@ def solve_det_marginal(x, sigma, A, Q): logdet = tf.compat.v1.Print(logdet, [sigma.logdet], "sigma logdet") logdet = tf.compat.v1.Print( logdet, - [2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor)))], + [ + 2 + * tf.reduce_sum( + input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor)) + ) + ], "iqf logdet", ) @@ -125,7 +136,8 @@ def solve_det_conditional(x, sigma, A, Q): # (Q - A' Sigma^{-1} A) lemma_factor = tlinalg.cholesky( - Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True)) + Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True) + ) logdet = ( -Q.logdet @@ -170,14 +182,17 @@ def _mnorm_logp_internal( if logging.getLogger().isEnabledFor(logging.DEBUG): solve_row = tf.compat.v1.Print( - solve_row, [tlinalg.trace(solve_col)], "coltrace") + solve_row, [tlinalg.trace(solve_col)], "coltrace" + ) solve_row = tf.compat.v1.Print( - solve_row, [tlinalg.trace(solve_row)], "rowtrace") + solve_row, [tlinalg.trace(solve_row)], "rowtrace" + ) solve_row = tf.compat.v1.Print(solve_row, [logdet_row], "logdet_row") solve_row = tf.compat.v1.Print(solve_row, [logdet_col], "logdet_col") - denominator = (-rowsize * colsize * log2pi - - colsize * logdet_row - rowsize * logdet_col) + denominator = ( + -rowsize * colsize * log2pi - colsize * logdet_row - rowsize * logdet_col + ) numerator = -tlinalg.trace(tf.matmul(solve_col, solve_row)) return 0.5 * (numerator + denominator) @@ -220,7 +235,7 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): .. math:: X \\sim \\mathcal{MN}(0, Q, C)\\ Y \\mid \\X \\sim \\mathcal{MN}(AX, R, C),\\ - Y \\sim \\mathcal{MN}(0, R + AQA, C) + Y \\sim \\mathcal{MN}(0, R + AQA', C) This function efficiently computes the marginals by unpacking some info in the covariance classes and then dispatching to solve_det_marginal. @@ -230,14 +245,13 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): x: tf.Tensor Observation tensor row_cov: CovBase - Row covariance implementing the CovBase API + Row covariance implementing the CovBase API (:math:`R` above). col_cov: CovBase - Column Covariance implementing the CovBase API + Column Covariance implementing the CovBase API (:math:`C` above). marg: tf.Tensor - Marginal factor + Marginal factor (:math:`A` above). marg_cov: CovBase - Prior covariance implementing the CovBase API - + Prior covariance implementing the CovBase API (:math:`Q` above). """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") colsize = tf.cast(tf.shape(input=x)[1], "float64") @@ -269,13 +283,13 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): x: tf.Tensor Observation tensor row_cov: CovBase - Row covariance implementing the CovBase API + Row covariance implementing the CovBase API (:math:`R` above). col_cov: CovBase - Column Covariance implementing the CovBase API + Column Covariance implementing the CovBase API (:math:`C` above). marg: tf.Tensor - Marginal factor + Marginal factor (:math:`A` above). marg_cov: CovBase - Prior covariance implementing the CovBase API + Prior covariance implementing the CovBase API (:math:`Q` above). """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") @@ -295,8 +309,42 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): """ - """ + Log likelihood for centered conditional matrix-variate normal density. + + Consider the following partitioned matrix-normal density: + .. math:: + \begin{bmatrix} + \operatorname{vec}\left[\mathbf{X}_{i j}\right] \\ + \operatorname{vec}\left[\mathbf{Y}_{i k}\right] + \end{bmatrix} \sim \mathcal{N}\left(0,\begin{bmatrix} + \Sigma_{j} \otimes \Sigma_{i} & \Sigma_{j k} \otimes \Sigma_{i} \\ + \Sigma_{k j} \otimes \Sigma_{i} & \Sigma_{k} \otimes \Sigma_{i} + \end{bmatrix}\right) + + Then we can write the conditional: + .. math :: + \mathbf{X}^{\top} j i \mid \mathbf{Y}_{k i}^{\top} \sim \mathcal{M}\\ + \mathcal{N}\left(0, \Sigma_{j}-\Sigma_{j k} \Sigma_{k}^{-1} \Sigma_{k j},\\ + \Sigma_{i}\right) + + This function efficiently computes the conditionals by unpacking some + info in the covariance classes and then dispatching to + solve_det_conditional. + Parameters + --------------- + x: tf.Tensor + Observation tensor + row_cov: CovBase + Row covariance (:math:`\Sigma_{i}` in the notation above). + col_cov: CovBase + Column covariance (:math:`\Sigma_{j}` in the notation above). + cond: tf.Tensor + Off-diagonal block of the partitioned covariance (:math:`\Sigma_{jk}` in the notation above). + cond_cov: CovBase + Covariance of conditioning variable (:math:`\Sigma_{k}` in the notation above). + + """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") colsize = tf.cast(tf.shape(input=x)[1], "float64") @@ -312,15 +360,41 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): """ - Log likelihood for centered matrix-variate normal density. Assumes that - row_cov, col_cov, and cond_cov follow the API defined in CovBase. + Log likelihood for centered conditional matrix-variate normal density. - When you go from joint to conditional in mnorm, you end up with a - covariance S - APA', where P is the covariance of A in the relevant - dimension. + Consider the following partitioned matrix-normal density: + .. math:: + \begin{bmatrix} + \operatorname{vec}\left[\mathbf{X}_{i j}\right] \\ + \operatorname{vec}\left[\mathbf{Y}_{i k}\right] + \end{bmatrix} \sim \mathcal{N}\left(0,\begin{bmatrix} + \Sigma_{j} \otimes \Sigma_{i} & \Sigma_{j k} \otimes \Sigma_{i} \\ + \Sigma_{k j} \otimes \Sigma_{i} & \Sigma_{k} \otimes \Sigma_{i} + \end{bmatrix}\right) + + Then we can write the conditional: + .. math :: + \mathbf{X}_{i j} \mid \mathbf{Y}_{i k} \sim \mathcal{M}\\ + \mathcal{N}\left(0, \Sigma_{i}, \Sigma_{j}-\Sigma_{j k}\\ + \Sigma_{k}^{-1} \Sigma_{k j}\right) + + This function efficiently computes the conditionals by unpacking some + info in the covariance classes and then dispatching to + solve_det_conditional. + + Parameters + --------------- + x: tf.Tensor + Observation tensor + row_cov: CovBase + Row covariance (:math:`\Sigma_{i}` in the notation above). + col_cov: CovBase + Column covariance (:math:`\Sigma_{j}` in the notation above). + cond: tf.Tensor + Off-diagonal block of the partitioned covariance (:math:`\Sigma_{jk}` in the notation above). + cond_cov: CovBase + Covariance of conditioning variable (:math:`\Sigma_{k}` in the notation above). - This method exploits the matrix inversion and determinant lemmas to - construct S - APA' given the covariance API in in CovBase. """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") colsize = tf.cast(tf.shape(input=x)[1], "float64") From 12eff1fceeb32906624af3cfe815bf8f90210338 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 19:39:20 -0700 Subject: [PATCH 45/84] simplify make_val_and_grad --- brainiak/matnormal/mnrsa.py | 8 ++++---- brainiak/matnormal/regression.py | 7 +++---- brainiak/matnormal/utils.py | 17 ++++++----------- 3 files changed, 13 insertions(+), 19 deletions(-) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 4ff7d574a..57c928ab0 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -130,12 +130,12 @@ def fit(self, X, y, structured_RSA_cov=None): self.train_variables.extend([self.L_flat]) - val_and_grad = make_val_and_grad(self, extra_args=(X, Y)) + lossfn = lambda theta: -self.logp(X, y) + val_and_grad = make_val_and_grad(lossfn, self.train_variables) + x0 = pack_trainable_vars(self.train_variables) - opt_results = minimize( - fun=val_and_grad, x0=x0, args=(X, Y), jac=True, method="L-BFGS-B" - ) + opt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method="L-BFGS-B") unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) for var, val in zip(self.train_variables, unpacked_theta): diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 81f7e24f7..1ff5b8580 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -76,12 +76,11 @@ def fit(self, X, y): self.train_variables.extend(self.time_cov.get_optimize_vars()) self.train_variables.extend(self.space_cov.get_optimize_vars()) - val_and_grad = make_val_and_grad(self, extra_args=(X, y)) + lossfn = lambda theta: -self.logp(X, y) + val_and_grad = make_val_and_grad(lossfn, self.train_variables) x0 = pack_trainable_vars(self.train_variables) - opt_results = minimize( - fun=val_and_grad, x0=x0, args=(X, y), jac=True, method="L-BFGS-B" - ) + opt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method="L-BFGS-B") unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) for var, val in zip(self.train_variables, unpacked_theta): diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 581306290..7413c21c0 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -79,18 +79,13 @@ def unpack_trainable_vars(x, trainable_vars): return [tf.reshape(fv, tv.shape) for fv, tv in zip(flatvars, trainable_vars)] -def make_val_and_grad(model, lossfn=None, extra_args=None, train_vars=None): - - if train_vars is None: - train_vars = model.train_variables - - if lossfn is None: - lossfn = lambda theta: -model.logp(*extra_args) - - if extra_args is None: - extra_args = {} +def make_val_and_grad(lossfn, train_vars): + """ + Makes a function that ouptuts the loss and gradient in a format compatible + with scipy.optimize.minimize + """ - def val_and_grad(theta, *extra_args): + def val_and_grad(theta): with tf.GradientTape() as tape: tape.watch(train_vars) unpacked_theta = unpack_trainable_vars(theta, train_vars) From 41eda2b9ce524326ac7692d6d42cc3f9a757af24 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 21:13:01 -0700 Subject: [PATCH 46/84] fix old tf1 stuff that would break in tf2 --- brainiak/matnormal/covs.py | 11 ++++++----- tests/matnormal/test_cov.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 18609cb14..2fe1c8e66 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -290,9 +290,6 @@ def __init__(self, size, diag_var=None): else: self.logprec = tf.Variable(np.log(1 / diag_var), name="log-precisions") - self.prec = tf.exp(self.logprec) - self.prec_dimaugmented = tf.expand_dims(self.prec, -1) - @property def logdet(self): return -tf.reduce_sum(input_tensor=self.logprec) @@ -312,7 +309,9 @@ def solve(self, X): Tensor to multiply by inverse of this covariance """ - return tf.multiply(self.prec_dimaugmented, X) + prec = tf.exp(self.logprec) + prec_dimaugmented = tf.expand_dims(prec, -1) + return tf.multiply(prec_dimaugmented, X) class CovDiagonalGammaPrior(CovDiagonal): @@ -327,7 +326,7 @@ def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): scale=tf.constant(beta, dtype=tf.float64), ) - self.logp = tf.reduce_sum(input_tensor=self.ig.log_prob(self.prec)) + self.logp = tf.reduce_sum(input_tensor=self.ig.log_prob(tf.exp(self.logprec))) class CovUnconstrainedCholesky(CovBase): @@ -359,6 +358,8 @@ def __init__(self, size=None, Sigma=None): L = np.linalg.cholesky(Sigma) self.L_flat = tf.Variable(flatten_cholesky_unique(L), name="L_flat") + self.optimize_vars = [self.L_flat] + @property def L(self): return unflatten_cholesky_unique(self.L_flat) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index f453fabf5..515d6c96a 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -111,7 +111,7 @@ def test_CovDiagonal(): cov = CovDiagonal(size=m) # compute the naive version - cov_np = np.diag(1 / cov.prec) + cov_np = cov._cov logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) From 3f6dde5df64318237f2865c885944a77062b2322 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 21:13:28 -0700 Subject: [PATCH 47/84] fix for maintaining the graph correctly --- brainiak/matnormal/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 7413c21c0..259c85a01 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -90,7 +90,7 @@ def val_and_grad(theta): tape.watch(train_vars) unpacked_theta = unpack_trainable_vars(theta, train_vars) for var, val in zip(train_vars, unpacked_theta): - var = val + var.assign(val) loss = lossfn(theta) grad = tape.gradient(loss, train_vars) packed_grad = pack_trainable_vars(grad) From 429e3295a49d425148fabddeb757cb8e00a0ed00 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 21:14:12 -0700 Subject: [PATCH 48/84] More stringent tests by doing bad initialization --- brainiak/matnormal/mnrsa.py | 31 ++++++++++++-------- brainiak/matnormal/regression.py | 21 ++++++++----- tests/matnormal/test_matnormal_regression.py | 8 ++--- tests/matnormal/test_matnormal_rsa.py | 2 +- 4 files changed, 36 insertions(+), 26 deletions(-) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 57c928ab0..68e90ab4b 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -90,7 +90,7 @@ def L(self): """ return unflatten_cholesky_unique(self.L_flat) - def fit(self, X, y, structured_RSA_cov=None): + def fit(self, X, y, naive_init=True): """ Estimate dimension reduction and cognitive model parameters Parameters @@ -117,20 +117,25 @@ def fit(self, X, y, structured_RSA_cov=None): self.n_c = X.shape[1] - # initialize from naive RSA - m = LinearRegression(fit_intercept=False) - m.fit(X=X, y=Y) - self.naive_U_ = np.cov(m.coef_.T) - naiveRSA_L = np.linalg.cholesky(self.naive_U_) - self.naive_C_ = cov2corr(self.naive_U_) - - self.L_flat = tf.Variable( - flatten_cholesky_unique(naiveRSA_L), name="L_flat", dtype="float64" - ) + if naive_init: + # initialize from naive RSA + m = LinearRegression(fit_intercept=False) + m.fit(X=X, y=Y) + self.naive_U_ = np.cov(m.coef_.T) + naiveRSA_L = np.linalg.cholesky(self.naive_U_) + self.L_flat = tf.Variable( + flatten_cholesky_unique(naiveRSA_L), name="L_flat", dtype="float64" + ) + else: + + chol_flat_size = (self.n_c * (self.n_c + 1)) // 2 + self.L_flat = tf.Variable( + tf.random.normal([chol_flat_size], dtype="float64"), name="L_flat", dtype="float64" + ) self.train_variables.extend([self.L_flat]) - lossfn = lambda theta: -self.logp(X, y) + lossfn = lambda theta: -self.logp(X, Y) val_and_grad = make_val_and_grad(lossfn, self.train_variables) x0 = pack_trainable_vars(self.train_variables) @@ -139,7 +144,7 @@ def fit(self, X, y, structured_RSA_cov=None): unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) for var, val in zip(self.train_variables, unpacked_theta): - var = val + var.assign(val) self.U_ = self.L.numpy().dot(self.L.numpy().T) self.C_ = cov2corr(self.U_) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 1ff5b8580..7b232c7b3 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -50,25 +50,29 @@ def logp(self, X, Y): resid = Y - y_hat return matnorm_logp(resid, self.time_cov, self.space_cov) - def fit(self, X, y): + def fit(self, X, y, naive_init=True): """ Compute the regression fit. Parameters ---------- X : np.array, TRs by conditions. Design matrix - Y : np.array, TRs by voxels. + y : np.array, TRs by voxels. fMRI data """ self.n_c = X.shape[1] - # initialize to the least squares solution (basically all - # we need now is the cov) - sigma_inv_x = self.time_cov.solve(X) - sigma_inv_y = self.time_cov.solve(y) + if naive_init: + # initialize to the least squares solution (basically all + # we need now is the cov) + sigma_inv_x = self.time_cov.solve(X) + sigma_inv_y = self.time_cov.solve(y) + + beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) - beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) + else: + beta_init = np.random.randn(self.n_c, self.n_v) self.beta = tf.Variable(beta_init, name="beta") @@ -83,8 +87,9 @@ def fit(self, X, y): opt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method="L-BFGS-B") unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) + for var, val in zip(self.train_variables, unpacked_theta): - var = val + var.assign(val) self.beta_ = self.beta.numpy() diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index f4338f7b2..ac1d0c7ce 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -38,7 +38,7 @@ def test_matnorm_regression_unconstrained(): model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) - model.fit(X, y) + model.fit(X, y, naive_init=False) assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol @@ -60,7 +60,7 @@ def test_matnorm_regression_unconstrainedprec(): model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) - model.fit(X, Y) + model.fit(X, Y, naive_init=False) assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol @@ -83,7 +83,7 @@ def test_matnorm_regression_optimizerChoice(): model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov, optimizer="CG") - model.fit(X, Y) + model.fit(X, Y, naive_init=False) assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol @@ -106,6 +106,6 @@ def test_matnorm_regression_scaledDiag(): model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) - model.fit(X, Y) + model.fit(X, Y, naive_init=False) assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index 6dd0bd18f..ceb52b2a7 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -64,7 +64,7 @@ def test_brsa_rudimentary(): model_matnorm = MNRSA(time_cov=timecov_model, space_cov=spacecov_model) - model_matnorm.fit(tr["Y"], tr["X"]) + model_matnorm.fit(tr["Y"], tr["X"], naive_init=False) RMSE = np.mean((model_matnorm.C_ - cov2corr(tr["U"])) ** 2) ** 0.5 From ad6ce7dfd010f3b70d04dd360d589c6064493f72 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 21:14:33 -0700 Subject: [PATCH 49/84] Update example with new way of doing things --- examples/matnormal/MN-RSA.ipynb | 242 +++++++++++++++++++++----------- 1 file changed, 163 insertions(+), 79 deletions(-) diff --git a/examples/matnormal/MN-RSA.ipynb b/examples/matnormal/MN-RSA.ipynb index a464927e5..4e1f3e76e 100644 --- a/examples/matnormal/MN-RSA.ipynb +++ b/examples/matnormal/MN-RSA.ipynb @@ -113,7 +113,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 1, @@ -122,9 +122,38 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADJ9JREFUeJzt3WusZWV9x/HvzxkEBzFAAS8MKWAI\nraEWyKRBbWwj0iIS8EVfoKWZVpN50YtoNAilqWnSNE00XpI2GgIoqYgvECshXpigxjSthGEcrkOF\nAoWB0aExVYIWZsK/L/aaJ4fJjDOz11p7nTPz/SQnZ1/W3v//PnPOb5619nr2k6pCkgBeMXUDkpYP\nA0FSYyBIagwESY2BIKkxECQ1kwdCkguT/GeSR5NcNXKtU5J8N8nWJA8muWLMekvqrkrywyS3L6DW\nsUluSfJw9zrfMnK9D3c/yweS3JzkqIGf/4YkO5I8sOS245NsTPJI9/24ket9ovt53pfka0mOHbPe\nkvs+mqSSnDBUvf2ZNBCSrAL+GXgX8CbgvUneNGLJXcBHquo3gfOAvxi53m5XAFsXUAfgs8C3quo3\ngN8es26Sk4EPAuuq6ixgFXDZwGW+CFy4x21XAXdW1RnAnd31MettBM6qqjcDPwKuHrkeSU4BLgCe\nHLDWfk09Qvgd4NGqeqyqXgS+Alw6VrGq2l5Vm7vLzzH7Yzl5rHoASdYC7wauG7NOV+s1wNuB6wGq\n6sWq+t+Ry64GXpVkNbAGeGbIJ6+q7wM/3ePmS4Ebu8s3Au8Zs15V3VFVu7qrPwDWjlmv82ngSmCh\nZw5OHQgnA08tub6Nkf9Ad0tyKnAOcNfIpT7D7B/2pZHrAJwOPAt8odtFuS7J0WMVq6qngU8y+19s\nO/CzqrpjrHpLvLaqtnc9bAdOWkDN3d4PfHPMAkkuAZ6uqnvHrLM3UwdC9nLb6ImY5NXAV4EPVdXP\nR6xzMbCjqu4Zq8YeVgPnAp+rqnOA5xl2OP0y3b77pcBpwBuAo5NcPla9qSW5htlu500j1lgDXAP8\n7Vg1fpWpA2EbcMqS62sZeMi5pyRHMAuDm6rq1jFrAW8DLknyBLPdoXck+dKI9bYB26pq96jnFmYB\nMZZ3Ao9X1bNVtRO4FXjriPV2+0mS1wN033eMXTDJeuBi4I9r3AlAb2QWsPd2vzdrgc1JXjdizWbq\nQLgbOCPJaUleyeyA1G1jFUsSZvvXW6vqU2PV2a2qrq6qtVV1KrPX9p2qGu1/0Kr6MfBUkjO7m84H\nHhqrHrNdhfOSrOl+tuezmIOntwHru8vrga+PWSzJhcDHgEuq6hdj1qqq+6vqpKo6tfu92Qac2/3b\njq+qJv0CLmJ25Pa/gGtGrvW7zHZJ7gO2dF8XLeh1/j5w+wLqnA1s6l7jvwLHjVzv74CHgQeAfwGO\nHPj5b2Z2fGInsz+ODwC/xuzdhUe678ePXO9RZse6dv/OfH7Menvc/wRwwti/N7u/0hWVpMl3GSQt\nIwaCpMZAkNQYCJIaA0FSs2wCIckG61lvudU6HOottWwCAVj0D8F6K7feofzapqjXLKdAkDSxhZ6Y\ntOqYo2v1iXv/bImXnnueVxyz94l5Rz7+y8F72ckLHMGRgz+v9Q6tWodKvf/jeV6sF/Y2mfBlVg9a\ndX/FTjyWtf/w5wf9uNPft2WEbqTDx1115wFt5y6DpKZXICzy8xAljW/uQJjg8xAljazPCGGhn4co\naXx9AmGyz0OUNI4+gXBAn4eYZEOSTUk2vfTc8z3KSRpbn0A4oM9DrKprq2pdVa3b13kGkpaHPoGw\n0M9DlDS+uU9MqqpdSf4S+DazFXtuqKoHB+tM0sL1OlOxqr4BfGOgXiRNzDMVJTULnctw5OO/nGte\nwmNfPnuues6BkA6OIwRJjYEgqTEQJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNgSCpMRAkNQaCpMZA\nkNQsdLbjvOadtegsSengOEKQ1BgIkhoDQVLTZym3U5J8N8nWJA8muWLIxiQtXp+DiruAj1TV5iTH\nAPck2VhVDw3Um6QFm3uEUFXbq2pzd/k5YCsu5SataIMcQ0hyKnAOcNcQzydpGr3PQ0jyauCrwIeq\n6ud7uX8DsAHgKNb0LSdpRL1GCEmOYBYGN1XVrXvbZunajkdwZJ9ykkbW512GANcDW6vqU8O1JGkq\nfUYIbwP+BHhHki3d10UD9SVpAn0We/03IAP2ImlinqkoqVkRsx3n5SxJ6eA4QpDUGAiSGgNBUmMg\nSGoMBEmNgSCpMRAkNQaCpMZAkNQYCJIaA0FSYyBIagwESc0hPdtxXs6S1OHKEYKkxkCQ1BgIkpre\ngZBkVZIfJrl9iIYkTWeIEcIVzJZxk7TC9V2oZS3wbuC6YdqRNKW+I4TPAFcCLw3Qi6SJ9Vm56WJg\nR1Xds5/tNiTZlGTTTl6Yt5ykBei7ctMlSZ4AvsJsBacv7bmRaztKK8fcgVBVV1fV2qo6FbgM+E5V\nXT5YZ5IWzvMQJDWDzGWoqu8B3xviuSRNxxGCpMbZjgNylqRWOkcIkhoDQVJjIEhqDARJjYEgqTEQ\nJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNgSCpcbbjMuAsSS0XjhAkNQaCpMZAkNT0Xbnp2CS3JHk4\nydYkbxmqMUmL1/eg4meBb1XVHyV5JbBmgJ4kTWTuQEjyGuDtwJ8CVNWLwIvDtCVpCn12GU4HngW+\n0C0Hf12SowfqS9IE+gTCauBc4HNVdQ7wPHDVnhu5tqO0cvQJhG3Atqq6q7t+C7OAeBnXdpRWjj5r\nO/4YeCrJmd1N5wMPDdKVpEn0fZfhr4CbuncYHgP+rH9LkqbSKxCqaguwbqBeJE3MMxUlNc52XMGc\nJamhOUKQ1BgIkhoDQVJjIEhqDARJjYEgqTEQJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNsx0PQ/PO\nWvz2M/M97m92/NZcj1u0u89eNXULk3OEIKkxECQ1BoKkpu/ajh9O8mCSB5LcnOSooRqTtHhzB0KS\nk4EPAuuq6ixgFXDZUI1JWry+uwyrgVclWc1soddn+rckaSp9Fmp5Gvgk8CSwHfhZVd0xVGOSFq/P\nLsNxwKXAacAbgKOTXL6X7VzbUVoh+uwyvBN4vKqeraqdwK3AW/fcyLUdpZWjTyA8CZyXZE2SMFvb\nceswbUmaQp9jCHcxW/F5M3B/91zXDtSXpAn0Xdvx48DHB+pF0sQ8U1FS42xHHbB5Zy3+/Un3L7Se\n5ucIQVJjIEhqDARJjYEgqTEQJDUGgqTGQJDUGAiSGgNBUmMgSGoMBEmNgSCpMRAkNc521OicJbly\nOEKQ1BgIkhoDQVKz30BIckOSHUkeWHLb8Uk2Jnmk+37cuG1KWoQDGSF8Ebhwj9uuAu6sqjOAO7vr\nkla4/QZCVX0f+OkeN18K3NhdvhF4z8B9SZrAvMcQXltV2wG67ycN15KkqYx+HkKSDcAGgKNYM3Y5\nST3MO0L4SZLXA3Tfd+xrQ9d2lFaOeQPhNmB9d3k98PVh2pE0pQN52/Fm4D+AM5NsS/IB4B+BC5I8\nAlzQXZe0wu33GEJVvXcfd50/cC+SJuaZipIaZztq2Vr0LMk/5Oy5HncocYQgqTEQJDUGgqTGQJDU\nGAiSGgNBUmMgSGoMBEmNgSCpMRAkNQaCpMZAkNQYCJIaZzvqgN199qqpWzgg885afOzL8z3u9Pdt\nmetxy5EjBEmNgSCpMRAkNfOu7fiJJA8nuS/J15IcO26bkhZh3rUdNwJnVdWbgR8BVw/cl6QJzLW2\nY1XdUVW7uqs/ANaO0JukBRviGML7gW/u684kG5JsSrJpJy8MUE7SWHoFQpJrgF3ATfvaxqXcpJVj\n7hOTkqwHLgbOr6oariVJU5krEJJcCHwM+L2q+sWwLUmayrxrO/4TcAywMcmWJJ8fuU9JCzDv2o7X\nj9CLpIl5pqKkxtmOUmfeWYuH0ixJRwiSGgNBUmMgSGoMBEmNgSCpMRAkNQaCpMZAkNQYCJIaA0FS\nYyBIagwESY2BIKlxtqPU06E0S9IRgqTGQJDUzLWU25L7PpqkkpwwTnuSFmnepdxIcgpwAfDkwD1J\nmshcS7l1Pg1cCbgmg3SImOsYQpJLgKer6t6B+5E0oYN+2zHJGuAa4A8OcPsNwAaAo1hzsOUkLdA8\nI4Q3AqcB9yZ5gtnKz5uTvG5vG7u2o7RyHPQIoaruB07afb0LhXVV9T8D9iVpAvMu5SbpEDTvUm5L\n7z91sG4kTcozFSU1BoKkxtmO0kQWOUvyhb/+9wPazhGCpMZAkNQYCJIaA0FSYyBIagwESY2BIKkx\nECQ1BoKkxkCQ1BgIkhoDQVJjIEhqUrW4T1FP8izw3/u4+wRgkR/DZr2VW+9Qfm1j1fv1qjpxfxst\nNBB+lSSbqmqd9ay3nGodDvWWcpdBUmMgSGqWUyBcaz3rLcNah0O9ZtkcQ5A0veU0QpA0MQNBUmMg\nSGoMBEmNgSCp+X8Kn/zycjAIDgAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAECCAYAAAD+eGJTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAMsElEQVR4nO3dfcyddX3H8ffHFsEisTAeVEpWNISFMAekWVAXt4hzDAn1j/2BjgWmSf/Yg2hICA/LzJJlMdH4kGzREEDJRPwDUQnxga5qzLLZUGp5LBMGDFqKZTFTUh3Q8N0f52pSm7tre851Xefc/b1fyZ37nHOdc77fc/e+P/1d17l+55eqQlK7XjPvBiTNlyEgNc4QkBpnCEiNMwSkxhkCUuPmHgJJLk7yH0meSHLdwLXOSPL9JI8meSTJ1UPW26/uiiQ/TnLPCLVWJ7kzyWNJtid5+8D1Ptb9LB9OckeS43p+/luT7E7y8H63nZRkY5LHu+8nDlzvk93P88EkX0+yesh6+227JkklObmvekuZawgkWQH8E/DHwDnAB5KcM2DJvcA1VXUOcCHwlwPX2+dqYPsIdQA+B3ynqn4L+J0h6yY5HfgIsK6qzgVWAJf3XOZLwMUH3HYdsKmqzgI2ddeHrLcROLeq3gb8BLh+4HokOQN4L/BMj7WWNO+RwO8CT1TVk1X1MvBVYP1QxapqV1Vt7S6/yOQP5PSh6gEkWQO8D7h5yDpdrTcA7wJuAaiql6vqfwYuuxJ4XZKVwCrguT6fvKp+CPzsgJvXA7d1l28D3j9kvaq6t6r2dld/BKwZsl7nM8C1wOBn8807BE4Hnt3v+g4G/qPcJ8la4Hxg88ClPsvkH/PVgesAnAm8AHyx2/24OcnxQxWrqp3Ap5j8b7UL+HlV3TtUvf2cVlW7usvPA6eNUHOfDwHfHrJAkvXAzqp6YMg6+8w7BOYiyeuBrwEfrapfDFjnUmB3Vd0/VI0DrAQuAD5fVecDe+h3qPxrun3x9UzC583A8UmuGKreUmpy3vso574nuZHJLuXtA9ZYBdwA/O1QNQ407xDYCZyx3/U13W2DSXIMkwC4varuGrIW8E7gsiRPM9nVeXeSLw9Ybwewo6r2jW7uZBIKQ3kP8FRVvVBVrwB3Ae8YsN4+P03yJoDu++6hCya5CrgU+NMadsLNW5mE6gPd780aYGuSNw5VcN4hcB9wVpIzk7yWyUGlu4cqliRM9pe3V9Wnh6qzT1VdX1Vrqmotk9f2vaoa7H/KqnoeeDbJ2d1NFwGPDlWPyW7AhUlWdT/bixjnAOjdwJXd5SuBbw5ZLMnFTHbpLquqXw5Zq6oeqqpTq2pt93uzA7ig+7cdrOhcv4BLmBxx/U/gxoFr/R6ToeODwLbu65KRXucfAPeMUOc8YEv3Gr8BnDhwvb8DHgMeBv4ZOLbn57+DyfGGV7o/iA8Dv8HkXYHHgX8BThq43hNMjl3t+535wpD1Dtj+NHDykP+G6QpJatS8dwckzZkhIDXOEJAaZwhIjTMEpMYtTAgk2WA96y1arRbqLUwIAKO+cOst63pH82sbvd4ihYCkORj1ZKEVJxxfK09Z+vMYXn1xD685YekJb8c+9avee3mFlziGY3t/XusdXbWOlnr/yx5erpey1LaVvVY6hJWnrGbNP/zFET/uLR/cNkA3Ujs216aDbnN3QGrcTCEw5ucDShrG1CEwh88HlDSAWUYCo34+oKRhzBICc/t8QEn9GfzAYJINSbYk2fLqi3uGLifpCM0SAof1+YBVdVNVrauqdQc7D0DS/MwSAqN+PqCkYUx9slBV7U3yV8B3maw8c2tVPdJbZ5JGMdMZg1X1LeBbPfUiaQ48Y1Bq3KhzB4596ldTzQN48ivnTVXPOQfSoTkSkBpnCEiNMwSkxhkCUuMMAalxhoDUOENAapwhIDXOEJAaZwhIjTMEpMYZAlLjDAGpcaPOIpzWtLMBnX0oHZojAalxhoDUOENAatwsy5CdkeT7SR5N8kiSq/tsTNI4ZjkwuBe4pqq2JjkBuD/Jxqp6tKfeJI1g6pFAVe2qqq3d5ReB7bgMmbTs9HJMIMla4Hxgcx/PJ2k8M58nkOT1wNeAj1bVL5bYvgHYAHAcq2YtJ6lnM40EkhzDJABur6q7lrrP/msRHsOxs5STNIBZ3h0IcAuwvao+3V9LksY0y0jgncCfAe9Osq37uqSnviSNZJYFSf8VSI+9SJoDzxiUGrcsZhFOy9mH0qE5EpAaZwhIjTMEpMYZAlLjDAGpcYaA1DhDQGqcISA1zhCQGmcISI0zBKTGGQJS4wwBqXFH9SzCaTn7UC1xJCA1zhCQGmcISI2bOQSSrEjy4yT39NGQpHH1MRK4mskSZJKWoVkXH1kDvA+4uZ92JI1t1pHAZ4FrgVd76EXSHMyyAtGlwO6quv8Q99uQZEuSLa/w0rTlJA1k1hWILkvyNPBVJisRffnAO7kWobTYpg6Bqrq+qtZU1VrgcuB7VXVFb51JGoXnCUiN62XuQFX9APhBH88laVyOBKTGOYuwR84+1HLkSEBqnCEgNc4QkBpnCEiNMwSkxhkCUuMMAalxhoDUOENAapwhIDXOEJAaZwhIjTMEpMY5i3ABOPtQ8+RIQGqcISA1zhCQGjfrCkSrk9yZ5LEk25O8va/GJI1j1gODnwO+U1V/kuS1wKoeepI0oqlDIMkbgHcBVwFU1cvAy/20JWkss+wOnAm8AHyxW5r85iTH99SXpJHMEgIrgQuAz1fV+cAe4LoD7+RahNJimyUEdgA7qmpzd/1OJqHwa1yLUFpss6xF+DzwbJKzu5suAh7tpStJo5n13YG/Bm7v3hl4Evjz2VuSNKaZQqCqtgHreupF0hx4xqDUOGcRLmPOPlQfHAlIjTMEpMYZAlLjDAGpcYaA1DhDQGqcISA1zhCQGmcISI0zBKTGGQJS4wwBqXGGgNQ4ZxE2aNrZgN99brrH/c3u357qcWO777wV825hLhwJSI0zBKTGGQJS42Zdi/BjSR5J8nCSO5Ic11djksYxdQgkOR34CLCuqs4FVgCX99WYpHHMujuwEnhdkpVMFiN9bvaWJI1plsVHdgKfAp4BdgE/r6p7+2pM0jhm2R04EVjPZGHSNwPHJ7liifu5FqG0wGbZHXgP8FRVvVBVrwB3Ae848E6uRSgttllC4BngwiSrkoTJWoTb+2lL0lhmOSawmclKxFuBh7rnuqmnviSNZNa1CD8OfLynXiTNgWcMSo1zFqEO27SzAf/+1IdGracj40hAapwhIDXOEJAaZwhIjTMEpMYZAlLjDAGpcYaA1DhDQGqcISA1zhCQGmcISI0zBKTGOYtQg3P24WJzJCA1zhCQGmcISI07ZAgkuTXJ7iQP73fbSUk2Jnm8+37isG1KGsrhjAS+BFx8wG3XAZuq6ixgU3dd0jJ0yBCoqh8CPzvg5vXAbd3l24D399yXpJFMe0zgtKra1V1+Hjitp34kjWzmA4NVVUAdbLtrEUqLbdoQ+GmSNwF033cf7I6uRSgttmlD4G7gyu7ylcA3+2lH0tgO5y3CO4B/B85OsiPJh4FPAH+Y5HEmqxN/Ytg2JQ3lkHMHquoDB9l0Uc+9SJoDzxiUGucsQi2ssWcf/hHnTfW45c6RgNQ4Q0BqnCEgNc4QkBpnCEiNMwSkxhkCUuMMAalxhoDUOENAapwhIDXOEJAaZwhIjXMWoQ7bfeetmHcLh2Xa2YBPfmW6x73lg9umetyicCQgNc4QkBpnCEiNm3Ytwk8meSzJg0m+nmT1sG1KGsq0axFuBM6tqrcBPwGu77kvSSOZai3Cqrq3qvZ2V38ErBmgN0kj6OOYwIeAbx9so8uQSYttphBIciOwF7j9YPdxGTJpsU19slCSq4BLgYu6RUklLUNThUCSi4Frgd+vql/225KkMU27FuE/AicAG5NsS/KFgfuUNJBp1yK8ZYBeJM2BZwxKjXMWodSZdjbgcp996EhAapwhIDXOEJAaZwhIjTMEpMYZAlLjDAGpcYaA1DhDQGqcISA1zhCQGmcISI0zBKTGOYtQmtFyn33oSEBqnCEgNW6qZcj223ZNkkpy8jDtSRratMuQkeQM4L3AMz33JGlEUy1D1vkMk48dd80BaRmb6phAkvXAzqp6oOd+JI3siN8iTLIKuIHJrsDh3H8DsAHgOFYdaTlJA5tmJPBW4EzggSRPM1mReGuSNy51Z9cilBbbEY8Equoh4NR917sgWFdV/91jX5JGMu0yZJKOEtMuQ7b/9rW9dSNpdJ4xKDXOEJAa5yxCaU7GnH340g3/dtBtjgSkxhkCUuMMAalxhoDUOENAapwhIDXOEJAaZwhIjTMEpMYZAlLjDAGpcYaA1DhDQGpcqsb7xPAkLwD/dZDNJwNjfkSZ9ZZvvaP5tQ1V7zer6pSlNowaAv+fJFuqap31rLdItVqo5+6A1DhDQGrcIoXATdaz3gLWOurrLcwxAUnzsUgjAUlzYAhIjTMEpMYZAlLjDAGpcf8Hu2X5EIe6s0cAAAAASUVORK5CYII=\n", "text/plain": [ - "" + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "application/javascript": [ + "\n", + " setTimeout(function() {\n", + " var nbb_cell_id = 1;\n", + " var nbb_unformatted_code = \"%load_ext nb_black\\nimport scipy\\nfrom scipy.stats import norm\\nfrom scipy.special import expit as inv_logit\\nimport numpy as np\\nfrom numpy.linalg import cholesky\\nimport matplotlib.pyplot as plt\\n\\n\\ndef rmn(rowcov, colcov):\\n # generate random draws from a zero-mean matrix-normal distribution\\n Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0])))\\n return cholesky(rowcov).dot(Z).dot(cholesky(colcov))\\n\\n\\ndef make_ar1_with_lowrank_covmat(size, rank):\\n \\\"\\\"\\\" Generate a random covariance that is AR1 with added low rank structure\\n \\\"\\\"\\\"\\n sigma = np.abs(norm.rvs())\\n rho = np.random.uniform(-1, 0)\\n offdiag_template = scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)])\\n diag_template = np.diag(np.r_[0, np.ones(size - 2), 0])\\n I = np.eye(size)\\n\\n prec_matrix = (I - rho * offdiag_template + rho ** 2 * diag_template) / (sigma ** 2)\\n lowrank_matrix = norm.rvs(size=(size, rank))\\n return np.linalg.inv(prec_matrix) + lowrank_matrix.dot(lowrank_matrix.T)\\n\\n\\ndef gen_data(n_T, n_V, space_cov, time_cov):\\n\\n n_C = 16\\n U = np.zeros([n_C, n_C])\\n U = np.eye(n_C) * 0.6\\n U[8:12, 8:12] = 0.8\\n for cond in range(8, 12):\\n U[cond, cond] = 1\\n\\n beta = rmn(U, space_cov)\\n\\n X = rmn(np.eye(n_T), np.eye(n_C))\\n\\n Y_hat = X.dot(beta)\\n\\n Y = Y_hat + rmn(time_cov, space_cov)\\n\\n return beta, X, Y, U\\n\\n\\nn_T = 100\\nn_V = 80\\nn_C = 16\\n\\nspacecov_true = np.diag(np.abs(norm.rvs(size=(n_V))))\\ntimecov_true = make_ar1_with_lowrank_covmat(n_T, rank=7)\\n\\ntrue_beta, true_X, true_Y, true_U = gen_data(n_T, n_V, spacecov_true, timecov_true)\\n\\n%matplotlib inline\\nplt.matshow(true_U)\";\n", + " var nbb_formatted_code = \"%load_ext nb_black\\nimport scipy\\nfrom scipy.stats import norm\\nfrom scipy.special import expit as inv_logit\\nimport numpy as np\\nfrom numpy.linalg import cholesky\\nimport matplotlib.pyplot as plt\\n\\n\\ndef rmn(rowcov, colcov):\\n # generate random draws from a zero-mean matrix-normal distribution\\n Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0])))\\n return cholesky(rowcov).dot(Z).dot(cholesky(colcov))\\n\\n\\ndef make_ar1_with_lowrank_covmat(size, rank):\\n \\\"\\\"\\\" Generate a random covariance that is AR1 with added low rank structure\\n \\\"\\\"\\\"\\n sigma = np.abs(norm.rvs())\\n rho = np.random.uniform(-1, 0)\\n offdiag_template = scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)])\\n diag_template = np.diag(np.r_[0, np.ones(size - 2), 0])\\n I = np.eye(size)\\n\\n prec_matrix = (I - rho * offdiag_template + rho ** 2 * diag_template) / (sigma ** 2)\\n lowrank_matrix = norm.rvs(size=(size, rank))\\n return np.linalg.inv(prec_matrix) + lowrank_matrix.dot(lowrank_matrix.T)\\n\\n\\ndef gen_data(n_T, n_V, space_cov, time_cov):\\n\\n n_C = 16\\n U = np.zeros([n_C, n_C])\\n U = np.eye(n_C) * 0.6\\n U[8:12, 8:12] = 0.8\\n for cond in range(8, 12):\\n U[cond, cond] = 1\\n\\n beta = rmn(U, space_cov)\\n\\n X = rmn(np.eye(n_T), np.eye(n_C))\\n\\n Y_hat = X.dot(beta)\\n\\n Y = Y_hat + rmn(time_cov, space_cov)\\n\\n return beta, X, Y, U\\n\\n\\nn_T = 100\\nn_V = 80\\nn_C = 16\\n\\nspacecov_true = np.diag(np.abs(norm.rvs(size=(n_V))))\\ntimecov_true = make_ar1_with_lowrank_covmat(n_T, rank=7)\\n\\ntrue_beta, true_X, true_Y, true_U = gen_data(n_T, n_V, spacecov_true, timecov_true)\\n\\n%matplotlib inline\\nplt.matshow(true_U)\";\n", + " var nbb_cells = Jupyter.notebook.get_cells();\n", + " for (var i = 0; i < nbb_cells.length; ++i) {\n", + " if (nbb_cells[i].input_prompt_number == nbb_cell_id) {\n", + " if (nbb_cells[i].get_text() == nbb_unformatted_code) {\n", + " nbb_cells[i].set_text(nbb_formatted_code);\n", + " }\n", + " break;\n", + " }\n", + " }\n", + " }, 500);\n", + " " + ], + "text/plain": [ + "" ] }, "metadata": {}, @@ -132,6 +161,7 @@ } ], "source": [ + "%load_ext nb_black\n", "import scipy\n", "from scipy.stats import norm\n", "from scipy.special import expit as inv_logit\n", @@ -139,22 +169,23 @@ "from numpy.linalg import cholesky\n", "import matplotlib.pyplot as plt\n", "\n", + "\n", "def rmn(rowcov, colcov):\n", " # generate random draws from a zero-mean matrix-normal distribution\n", " Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0])))\n", - " return(cholesky(rowcov).dot(Z).dot(cholesky(colcov)))\n", + " return cholesky(rowcov).dot(Z).dot(cholesky(colcov))\n", "\n", "\n", "def make_ar1_with_lowrank_covmat(size, rank):\n", " \"\"\" Generate a random covariance that is AR1 with added low rank structure\n", " \"\"\"\n", " sigma = np.abs(norm.rvs())\n", - " rho = np.random.uniform(-1,0)\n", - " offdiag_template = scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size-2)])\n", - " diag_template = np.diag(np.r_[0,np.ones(size-2),0])\n", + " rho = np.random.uniform(-1, 0)\n", + " offdiag_template = scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)])\n", + " diag_template = np.diag(np.r_[0, np.ones(size - 2), 0])\n", " I = np.eye(size)\n", "\n", - " prec_matrix = (I - rho * offdiag_template + rho**2 * diag_template) / (sigma**2)\n", + " prec_matrix = (I - rho * offdiag_template + rho ** 2 * diag_template) / (sigma ** 2)\n", " lowrank_matrix = norm.rvs(size=(size, rank))\n", " return np.linalg.inv(prec_matrix) + lowrank_matrix.dot(lowrank_matrix.T)\n", "\n", @@ -166,7 +197,7 @@ " U = np.eye(n_C) * 0.6\n", " U[8:12, 8:12] = 0.8\n", " for cond in range(8, 12):\n", - " U[cond,cond] = 1\n", + " U[cond, cond] = 1\n", "\n", " beta = rmn(U, space_cov)\n", "\n", @@ -178,6 +209,7 @@ "\n", " return beta, X, Y, U\n", "\n", + "\n", "n_T = 100\n", "n_V = 80\n", "n_C = 16\n", @@ -201,23 +233,14 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Optimization terminated with:\n", - " Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'\n", - " Objective function value: 20082.401254\n", - " Number of iterations: 144\n", - " Number of functions evaluations: 163\n" - ] - }, { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 2, @@ -226,9 +249,38 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEgdJREFUeJzt3XuMnOV1x/Hf8ezFu/b6bjDYDmsc\nZBoMrR0XEUAEcGhcQICiqiIKkdPQgpo2AZKUgKwWRVWlSkSBqC2kFAgoIFBFSIKAABaBoiZAA8bG\nNnaDIQZf8YXaxnvx7uye/jHjp8t27d05M+87Y/h+pNVeZs6eZ2dnf/vO5Zlj7i4AkKRx9V4AgMZB\nIABICAQACYEAICEQACQEAoCk7oFgZsvM7L/NbJOZ3ZRxr7lm9pyZbTCz9WZ2XZb9hvQtmNlrZvZ4\nDr2mmNkjZrax/HN+JuN+N5Qvy3Vm9pCZja/x97/XzHaZ2bohX5tmZivN7M3y+6kZ97u1fHm+bmY/\nNbMpWfYbctq3zczNbEat+o2mroFgZgVJ/yLpjyV9StIXzexTGbYsSvqWu/+epLMk/VXG/Q67TtKG\nHPpI0g8kPeXup0r6/Sz7mtlsSd+QtMTdF0oqSLqyxm3uk7Rs2NdukvSsu58i6dny51n2Wylpobuf\nIem3km7OuJ/MbK6kiyS9W8Neo6r3EcKZkja5+9vu3ifpYUmXZ9XM3Xe4+6ryxx+o9McyO6t+kmRm\ncyRdIunuLPuUe02SdJ6keyTJ3fvcfV/GbZsktZlZk6R2Sdtr+c3d/QVJ7w/78uWS7i9/fL+kK7Ls\n5+7PuHux/OlLkuZk2a/sNkk3Ssr1mYP1DoTZkrYM+XyrMv4DPczMOiUtkvRyxq1uV+kXO5hxH0k6\nWdJuST8q30S528wmZNXM3bdJ+p5K/8V2SNrv7s9k1W+I4919R3kNOyQdl0PPw74q6RdZNjCzyyRt\nc/c1WfYZSb0DwUb4WuaJaGYTJf1E0vXufiDDPpdK2uXur2bVY5gmSYsl3enuiyR1qbaH0x9Svu1+\nuaR5kk6UNMHMrsqqX72Z2QqVbnY+mGGPdkkrJP1dVj2Opt6BsFXS3CGfz1GNDzmHM7NmlcLgQXd/\nNMteks6RdJmZbVbp5tCFZvZAhv22Strq7oePeh5RKSCy8jlJv3P33e7eL+lRSWdn2O+w98zsBEkq\nv9+VdUMzWy7pUklf8mw3AM1XKWDXlK83cyStMrNZGfZM6h0Iv5F0ipnNM7MWle6QeiyrZmZmKt2+\n3uDu38+qz2HufrO7z3H3TpV+tl+6e2b/Qd19p6QtZrag/KWlkt7Iqp9KNxXOMrP28mW7VPncefqY\npOXlj5dL+nmWzcxsmaTvSLrM3buz7OXua939OHfvLF9vtkpaXP7dZs/d6/om6WKV7rl9S9KKjHud\nq9JNktclrS6/XZzTz3m+pMdz6PMHkl4p/4w/kzQ1437flbRR0jpJP5bUWuPv/5BK90/0q/THcbWk\n6So9uvBm+f20jPttUum+rsPXmR9m2W/Y6Zslzcj6enP4zcpNAaDuNxkANBACAUBCIABICAQACYEA\nIGmYQDCza+hHv0br9XHoN1TDBIKkvC8E+h27/T7KP1s9+iWNFAgA6izXJybNmFbwzrnNI562e++A\nZk4vjHja+vdmhvqNKx75tGJvl5rGj7wRcLAp1G7krVqH+3V3qal95H4+8o9dlYGuLhUmjNyvqSf2\nPQvdR75A+wa61VJoH/nEweBGz8LIF0xfsVstTUfoJUnB63R/x8i/+GJPl5rajrxptNAX6+fjRr7C\n9B/qUnPrUfod7Ku4V8/AAfUN9BzlGloSveqHdM5t1n89PXf0Mw5z+m1fC/Vr2x37RfXMGPVyG9Hg\nyFk3qv6O2DotmOXT18YKp6zZG6qz7t5Q3eCUibF+vf2hup0XxP7xdGwdCNUV22MH6FN+vWX0Mw3z\n650Pjel83GQAkFQVCHm+HiKA7IUDoQ6vhwggY9UcIeT6eogAsldNINTt9RABZKOaQBjT6yGa2TVm\n9oqZvbJ7b+zeWAD5qCYQxvR6iO5+l7svcfclR3qeAYDGUE0g5Pp6iACyF35ikrsXzeyvJT2t0sSe\ne919fc1WBiB3VT1T0d2flPRkjdYCoM54piKAJNe9DOvfmxnal7D2hjtC/T57bWwXaev+2F6Gvo5Y\nvvYHh60VYlsE1N8W+/k2fyH2XP9pG2OPLu05I3Yn9ORNsb0aA8HLZf/JsT+jcf2xdbaeWvnMlsF9\nY9towxECgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABIct3tOK4Y\nm6YU3bX4H/96V6ju81d8OVQ3ac8HobreedNDdXtPaw3VNX8Q22XXtidW1zst9n/Hgi/BuWdRbJ2f\n/OaLobqB8xeH6rafMz5UN37HwYprxvWP7cLkCAFAQiAASAgEAEk1o9zmmtlzZrbBzNab2XW1XBiA\n/FVzp2JR0rfcfZWZdUh61cxWuvsbNVobgJyFjxDcfYe7ryp//IGkDWKUG3BMq8l9CGbWKWmRpJdr\n8f0A1EfVgWBmEyX9RNL17n5ghNPTbMdib1e17QBkqKpAMLNmlcLgQXd/dKTzDJ3t2DQ++HrjAHJR\nzaMMJukeSRvc/fu1WxKAeqnmCOEcSV+WdKGZrS6/XVyjdQGog2qGvf6npNioGwANiWcqAkhy3e04\n2CT1zKj8oCI6azG6a/Hpn/04VHf67ZXPrZSkCdsHQ3WFQ8Hdh9Njl2dxYqhMA62xdTb/v8esxqZ1\nb3B35adPC9U1/WpdqK5zdVuozjsDT/exsf3OOUIAkBAIABICAUBCIABICAQACYEAICEQACQEAoCE\nQACQEAgAEgIBQEIgAEgIBABJrrsdZdJgc+VlfR2x3IrOWozuWlx7/R2huj9c8ZehutZ9sV2EHvw3\nMPP1/mC/2O7Kpq5iqM4GY5fLtgsmh+qOn7QwVLfn9NhsxylvVf57GHx7bL90jhAAJAQCgIRAAJDU\nYi5DwcxeM7PHa7EgAPVTiyOE61Qa4wbgGFftoJY5ki6RdHdtlgOgnqo9Qrhd0o2SYq8SCqChVDO5\n6VJJu9z91VHO93+zHbuZ7Qg0smonN11mZpslPazSBKcHhp/pQ7Md25ntCDSycCC4+83uPsfdOyVd\nKemX7n5VzVYGIHc8DwFAUpO9DO7+vKTna/G9ANQPRwgAklx3O3pB6u+ofCdaf/C+yN5500N10VmL\n0V2Lv/mHO0N1F/3pV0J1k96K7SI8MD823LG5K3Z5HuiM7Qbs2HooVDd580CoruuEllDdhJ2N92g9\nRwgAEgIBQEIgAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACDJd7ajJAuM3Sv0\nxnrtPa01VFc4FJsNGJ21GN21uPLf7wvVLf772K7M4146EKobmBjbDTjYHJsJGZ3t2DM99v9x1hPv\nhuo2XfuJUN38hyv/PYw7NLadnBwhAEgIBAAJgQAgqXZy0xQze8TMNprZBjP7TK0WBiB/1d6p+ANJ\nT7n7n5hZi6T2GqwJQJ2EA8HMJkk6T9JXJMnd+yT11WZZAOqhmpsMJ0vaLelH5XHwd5sZo5mAY1g1\ngdAkabGkO919kaQuSTcNP9PQ2Y4DXcx2BBpZNYGwVdJWd3+5/PkjKgXEhwyd7ViYwAEE0Miqme24\nU9IWM1tQ/tJSSW/UZFUA6qLaRxm+LunB8iMMb0v6s+qXBKBeqgoEd18taUmN1gKgznimIoAk192O\nTT3S9LWB2Y5tsV1vzR/Edr31To/182C8RmctRnctrvrb2CzJ8752Taiu/Z3Yo0v9J8V2qw42x+qa\nu4LXlwWzQnUz18RmO3bNm1xxzeCWwpjOxxECgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKA\nhEAAkBAIABICAUBCIABIct3tWOguasqavRXXbf7CzFC/tj2x3WvFiaEyzXy9P1R3YH6sYXTWYnTX\n4gt33BWqu2X3aaG6qO6B2CzJ9ed3xPqdu2D0M41g/N7Y9aVnZuU/n48b2w5ejhAAJAQCgIRAAJBU\nO9vxBjNbb2brzOwhMxtfq4UByF84EMxstqRvSFri7gslFSRdWauFAchftTcZmiS1mVmTSoNet1e/\nJAD1Us2glm2SvifpXUk7JO1392dqtTAA+avmJsNUSZdLmifpREkTzOyqEc6XZjv2DXTHVwogc9Xc\nZPicpN+5+25375f0qKSzh59p6GzHlkJ7Fe0AZK2aQHhX0llm1m5mptJsxw21WRaAeqjmPoSXVZr4\nvErS2vL3ij23FUBDqHa24y2SbqnRWgDUGc9UBJDkuttRg4Oy7t6Ky6ZtHAi1650Wy7uB1tguybHu\nKBuuuSs2429gYmxXX3TWYnTX4ndnrg/V/dP/nBSqmzk+tgt0/awLQ3Ut+/pCdTYYu54dPLHyJwQP\nNI/tfBwhAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAJN/djoWC\nBqdUPsdwzxmFUDuLbZJUc2yznJq6iqG6A52xcRaDzbHdlf0ntYbqoqK7Fr8+9Z1Q3a3vzw/VWVdP\nqO7AmbHZo637Y7tcW/ZXvkty3Bj/FjhCAJAQCAASAgFAMmogmNm9ZrbLzNYN+do0M1tpZm+W30/N\ndpkA8jCWI4T7JC0b9rWbJD3r7qdIerb8OYBj3KiB4O4vSHp/2Jcvl3R/+eP7JV1R43UBqIPofQjH\nu/sOSSq/P652SwJQL5nfqfih2Y5FZjsCjSwaCO+Z2QmSVH6/60hn/NBsxyZmOwKNLBoIj0laXv54\nuaSf12Y5AOppLA87PiTpRUkLzGyrmV0t6R8lXWRmb0q6qPw5gGPcqHsZ3P2LRzhpaY3XAqDOeKYi\ngCTf3Y7ust7+issmb4rNwNuzKFbXujeWk9FZfR1bD+Xab7A5ttuxeyA2SzI6azG6a/Fvpr0Vqnuu\nGOs3YXvl12lJUmyzqortlf/Z+hiv0hwhAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgE\nAAmBACAhEAAkBAKAJNfdjv0dTdp5QeVz8AbaYtvCPvnNF0N19unTQnXbLpgcqpu8OTaEsmd6LM+b\nu2K7JNef3xGrm3VhqC46azG6a/HJ154J1Z3/538Rqjs4O/bn1zu98prBMbbiCAFAQiAASAgEAEl0\ntuOtZrbRzF43s5+a2ZRslwkgD9HZjislLXT3MyT9VtLNNV4XgDoIzXZ092fcvVj+9CVJczJYG4Cc\n1eI+hK9K+sWRThw6yq3Y01WDdgCyUlUgmNkKSUVJDx7pPENHuTW1TaimHYCMhZ+YZGbLJV0qaam7\nx57pAqChhALBzJZJ+o6kz7o7I52Bj4jobMd/ltQhaaWZrTazH2a8TgA5iM52vCeDtQCoM56pCCDJ\ndbdjoc/VsbXynX37T44tc+D8xaG6pl+tG/1MIzh+0sJQXdcJsZmJs554N1TXu2BWqK773AWhupZ9\nfaG6A2dWvjNWis9ajO5afP7ufwvVLbvkS6G6tnkTK67ZNsaNoxwhAEgIBAAJgQAgIRAAJAQCgIRA\nAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAJNfdjj7OVGyvPIPG9cdeoW37OeNDdZ2r20J1e06P\n9ZuwczBUt+naT4TqZq6J9Ru/N7aL0AZjv7/W/bF1KjYKNDxrMbpr8aknjvhSpEe19KqrK66xgbH9\nDjhCAJAQCACS0Ci3Iad928zczGZkszwAeYqOcpOZzZV0kaTYy/YAaDihUW5lt0m6URIzGYCPiNB9\nCGZ2maRt7r6mxusBUEcVP85iZu2SVkj6ozGe/xpJ10hSS/vUStsByFHkCGG+pHmS1pjZZpUmP68y\nsxFfynfobMfmVmY7Ao2s4iMEd18r6bjDn5dDYYm776nhugDUQXSUG4CPoOgot6Gnd9ZsNQDqimcq\nAkgIBACJuef3vKLJLcf72bOOegtkRD2nxmYRjt9xMFTnzYVQXc/sfB9Fad98IFTXNW9yqC6yU1WS\nDp4Yq2vZH7tuFttj2x17p4fKNGNd5fNKJallXzFU9+wDlQ9fP/PzW/TKmt5RLxiOEAAkBAKAhEAA\nkBAIABICAUBCIABICAQACYEAICEQACQEAoCEQACQEAgAEgIBQJLrbkcz2y3pnSOcPENSni/DRr9j\nt99H+WfLqt9J7j5ztDPlGghHY2avuPsS+tGvkXp9HPoNxU0GAAmBACBppEC4i370a8BeH4d+ScPc\nhwCg/hrpCAFAnREIABICAUBCIABICAQAyf8CmWtrlo7vWjkAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQoAAADxCAYAAAAz6fmnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAapklEQVR4nO3dfZAd1Xnn8e9vXoTQG0gIC4RkhLNYG0wIYK2DTdkhBjvEcUGqNslC/AIOu6rEWcc2OF4ILns3qXIpIbHNbhK8CjhgmyV2CE6omBhYbIpdF7C8GAOSwBCI5QEJAQYEEiNpZp79o3vwWJq595zp7jv3zvw+VV2al6Onz70jPXP69OnnKCIwM2ulb6Y7YGbdz4nCzNpyojCztpwozKwtJwoza8uJwszacqIw6yGSviRph6SHp/i+JP13SY9LelDSyXWc14nCrLdcDZzZ4vu/AhxbHuuBK+o4qROFWQ+JiDuAH7docjbw5SjcBRwq6ciq53WiMJtdjgJ+NOHzofJrlQxUDWBmrf3yLy2M5388mtT2vgf3bAKGJ3xpY0RsbKRjGZwozBr23I9HufvmVUltB4/8l+GIWFfhdE8Bqyd8vqr8WiW+9DBrXDAaY0lHDW4EPlje/TgFeCkitlUN6hGFWcMCGKOep7QlXQecBiyXNAR8BhgEiIgvAjcB7wEeB3YDH6rjvE4UZg0Lgn2RNkfRNlbEuW2+H8Dv1XKyCZwozDqgrhHFTJnROQpJZ0p6tFxFdnFNMVdL+o6kzZI2SfpoHXEnxO+X9D1J/1Rz3EMlXS/pEUlbJL21prgfL9+HhyVdJ2l+hVgHrAqUtEzSrZIeK/9cWlPcy8r34kFJ35B0aF19nvC9iySFpOXTiZ0qgFEi6ehWM5YoJPUDf0mxkuw44FxJx9UQegS4KCKOA04Bfq+muOM+CmypMd64y4FvRcS/BX6+jnNIOgr4fWBdRBwP9APnVAh5NQeuCrwYuC0ijgVuKz+vI+6twPERcQLwA+CSacSdKjaSVgPvBrZOM26WMSLp6FYzOaJ4C/B4RDwREXuBv6VYVVZJRGyLiPvLj1+m+A9XecEJgKRVwK8CV9YRb0LcQ4B3AFcBRMTeiHixpvADwMGSBoAFwNPTDTTFqsCzgWvKj68Bfq2OuBFxS0SMlJ/eRXGbL1uLlYyfBz4Jzf/vDGA0IunoVjOZKBpZQTaRpDXAScDdNYX8AsU/rlruY01wDPAs8DflZc2VkhZWDRoRTwF/RvFbcxvFrbJbqsbdz4oJt9+2Aytqjg/w28A/1xVM0tnAUxHx/bpitjOWeHSrWbuOQtIi4O+Bj0XEzhrivRfYERH3Ve7cgQaAk4ErIuIkYBfTG8L/lHK+4GyKRLQSWCjp/VXjTqWcca/116KkSykuJ6+tKd4C4A+BT9cRL0Ukzk94jmJyjawgA5A0SJEkro2IG+qICZwKnCXpXykuk94p6as1xR4ChiJifORzPUXiqOoM4MmIeDYi9gE3AG+rIe5Ez4w/dFT+uaOuwJLOB94LvC/qKxf/MxSJ8/vlz3IVcL+kI2qKf4AI2Jd4dKuZTBT3AMdKOkbSPIpJthurBpUkimv9LRHxuarxxkXEJRGxKiLWUPT12xFRy2/niNgO/EjS2vJLpwObawi9FThF0oLyfTmd+idibwTOKz8+D/jHOoJKOpPiMu+siNhdR0yAiHgoIl4XEWvKn+UQcHL5M2iIGE08utWMJYpyouo/AzdT/OP9ekRsqiH0qcAHKH7jP1Ae76khbtM+Alwr6UHgROCzVQOWI5TrgfuBhyh+3tN+wKhcFXgnsFbSkKQLgA3AuyQ9RjGC2VBT3L8AFgO3lj/DL9bY544KYCzSjm4lbwBk1qzjT5gXX//m4Ult3/T6p++r+FBYI7wy06xhxYKr7r2sSOFEYdYBY+FEYWYteERhZm0FYl/0z3Q3KumKBVeS1jtuc3GbjN1rcZuOPZnxEYVvj1bX1A/OcZuP3Wtxm449CTEafUlHt/Klh1nDigpX3ZsEUnQ0USxf1h9rVg8e8PXXHzXAup+f/1MLOh4bPiQ57vCeA2MC9B92KAetWXXAQpG+4Ywh3iRNB5Ys5eAjVx8Qdyzj3dQkBY8GDlnK/JUHxu3fmx53qkvhwcVLWbCi/j4PLlnKwUccGFcZTzhN1oeBJZO/FzlxASb7JT3V+6yMJUXD24eei4i0xRF4MjPLmtWD/L+bV7dvCLzn0fTFlFueWJnVj8Wb5yW3HZs8B01qz7L0f2mDO9P/4SwaSo+7b1HeP8jdK9Jjz3spPfbgrvS4w8vS4w4Mt28z0b6MZ3D79qW3feSzF/4wtW2EarusKJe2X05RW+TKiNiw3/dfT/G4/6Flm4sj4qaq563U+yYqVJnNRmMo6WglsdjTpygehziJ4pmkv6qj/9MeUUzo9LsoHqy5R9KNEVHHw0xms0Yg9kYtg/fXij0BSBov9jTx/1wAS8qPD6FCoaKJqvQ+pdNmc17mZOZySfdO+HziTmGTFXv6hf3+/n8FbpH0EWAhxYN6lVVJFCmdNjNgNH0J93MVHwo7F7g6Iv68LND8FUnHR1TbXajxycxycct6KO5umM01gRit5/ZoSrGnCyiLCUfEnWXV9eVULChUpfdJFaoiYmNErIuIdYcf1tvLWM2mayz6ko42Uoo9baUoUISknwXmU9RjraTKr/jXOk2RIM4Bfqtqh8xmm2IJd/URRUSMSBov9tQPfCkiNkn6I+DeiLgRuAj4a0kfL099fh1lBKedKKbqdNUOmc02dT4UVq6JuGm/r316wsebKaq81arSpMFknW7lseFDkhdS3bQ2fY3IL//SicltAX7439Lryy7amp6Moy994dDw4elzS0ueTG7KrszdL/YtT19lNLog/Z/L3ozVr/NeSm7KvBfzfjnm/EyaWjwZQVc/x5HCs4tmjWu/mKrbOVGYNazYKcwjCjNro6bbozPGicKsYYFcM9PM2vOIwsxamg01M50ozBpW7BTmEYWZteEKV2bWUoQ8ojCz9ryOIsPwnsHk+pY5y7JvfvqBrH4c+5W3JrfdeWx63OhLX1687KH0oejLR6e3Xbopb4lz/970fwI5hXiLK/M0r6xKf32vHJ3TB4icirkNKQrX+NLDzFqqr7juTHGiMGtYQM/fHp12mpO0WtJ3JG2WtEnSR+vsmNlsMb4yM+XoVlVGFCPARRFxv6TFwH2SbnUVbrMDzdmdwiJiG7Ct/PhlSVsoCu46UZhNUNSj6N7RQopa5igkrQFOAu6uI57ZbNPNlxUpKo+HJC0C/h74WETsnOT76yXdK+ne0Vd2VT2dWc8p5ihqKa6btDufpN+cMHf4v+p4DZVGFJIGKZLEtRFxw2Rtys1LNgKTbhhsNhfUsYQ7ZXc+SccClwCnRsQLkl5X+cRU21JQwFXAloj4XB2dMZuNAjEyVsvt0ZTd+f4T8JcR8QJARFTaz2NclUuPU4EPAO+U9EB5pG9BbjaHZGxSvHz8Ur081k8IM9nufEftd6o3Am+U9F1Jd5W7n1dW5a7H/6WxusVms0fmXY+qWwoOAMcCp1FsynWHpJ+LiBcrxOzsysy+YbF487yktjkl9XOe3QB47ANXJLc9+Y9+N7ntiz+XXoJ/5xvSB3OHPZwe99kT83L3wHDGoDJjhkkZO10u2J4euG97elyAF9emtx1ZuTcveIaanh5N2Z1vCLg7IvYBT0r6AUXiuKfKiXt7FYhZD6hxZWbKloL/QDGaQNJyikuRJ6q+Bj/rYdYBdTw9mril4M3AuyVtBkaBP4iI56ue24nCrGFFKbx6pvMSthQM4MLyqI0ThVnTorbbozPGicKsYS5cY2ZJev1ZDycKs4bVOUcxU5wozDrAicLMWvLeo2bWXsCIi+tmEIwNpjVdtDV9WW9OSX3IW5Z9/6fTl3uf+rHfSW47vDS5KbtWpP8jG5ufsXYaiIxVyzm/FEfnp7fVSHrgkUV5lQqWbklvu3PPQVmxU3mOwsySOFGYWUuzYY6ijlJ4/ZK+J+mf6uiQ2WwUoaSjW9UxovgosAVYUkMss1mp11dmVhpRSFoF/CpwZT3dMZt9IpjTGwABfAH4JLB4qgZlKa/1AANLMqb6zWYNMTrW27dHq2wp+F5gR0Tc16pdRGyMiHURsW5gwcLpns6sp83lOYpTgbPKgrrzgSWSvhoR76+na2azw2xYRzHtEUVEXBIRqyJiDUVJrm87SZhNIop5ipSjW3kdhVkH9Ppdj1oSRUTcDtzert3YAOxZlpY2oy/9jY2+vFScUy07Z1n2d7/wxeS2b/i79LgHb09/L1bekfde7FmcHnvfwoyl1gvS+3DEPa8mt911RN4y61cPSx80L9jWzK/0gNrmH8p9Oi6nqJl5ZURsmKLdvweuB/5dRNxb9bweUZg1rp5bnylbCpbtFlOsb6pt0/Devmdj1iPGxpR0tPHaloIRsRcY31Jwf38M/AkwXFf/nSjMGlZMVCbfHq20paCkk4HVEfHNOl+DLz3MOiDj0mPaWwpK6gM+B5w/nb/fihOFWQfUdOuz3ZaCi4HjgdslARwB3CjprKoTmk4UZh1Q012P17YUpEgQ5wC/9ZNzxEvA8vHPJd0OfKKOux6eozBrWJA2P9EumUTECDC+peAW4OvjWwpKOqvJ1+ARhVkH1LVCo92Wgvt9/bSaTutEYda4gGh/67OrOVGYdUA3PxmaoqOJQqMwuDPtDRs+PH2Z9bKH8n4IO9+QPjWTUy07Z1n2E7+Rvtz7xA0fTm677dS892J0wWhy275X09+3vozK2s+8+eDktqmPAIzr35PeduFQVugs3fzAVwqPKMwaVuezHjPFicKsaUHepihdqGrNzEMlXS/pEUlbJL21ro6ZzSZzvR7F5cC3IuLXJc0DMh4uNptDujgJpJh2opB0CPAOynXl5dNsGRvUmc0V6vnbo1UuPY4BngX+ptwA6EpJB1TPlbR+/Em40d27KpzOrEflPT3alaokigHgZOCKiDgJ2AVcvH+jiVW4+12F2+aqSDy6VJVEMQQMRcR4FZ3rKRKHmR1AiUd3qlKFezvwI0lryy+dDmxu8VfM5q4eH1FUvevxEeDa8o7HE8CHqnfJbBbq4iSQolKiiIgHgORqPP17YdFQ2ju25Mn0frx8dN6Q7bCH05eH71qRPujKqZadsyz7gYv/Krntm/5HelyAhdsy3ruMf+x7ljYTd+TovDKQI8+nV+0endfQ0N8PhZlZkrk8ojCzRF186zOFE4VZB6jHRxQuhWfWtNQ7HgnJRNKZkh6V9LikA9YtSbpQ0mZJD0q6TdLRdbwEJwqzxqm49Eg5WkX5yU5hvwIcB5wr6bj9mn0PWBcRJ1CsbfrTOl6BE4VZJ9Qzomi7U1hEfCcidpef3kVR0r8yJwqzThhLPFpru1PYfi4A/nla/d2PJzPNmpZXuGa5pIn7cGyMiI25p5T0foo1Tr+Y+3cn40Rh1gEZdz1abSnYbqew4lzSGcClwC9GREbV0Kn50sOsE+qZo3htp7DysYlzgBsnNpB0EvA/gbMiYkdd3e/oiCL6Yd+itCHYrowpmKWb8m5SP3ti+uKXsfnpy71X3pHej5xq2TnLsjd9JH25N8DfvpxeZvzQ/t3tG5WGYzC57aeu/GBy275t85PbAix/IP1nsmNddy92iIgRSeM7hfUDXxrfKQy4NyJuBC4DFgF/V+4/ujUiKu8i5ksPsw6oa8FVu53CIuKMes7005wozDqhx5dwV63C/XFJmyQ9LOk6SXnjQrO5IKjr9uiMmXaikHQU8PsUq8COp7hmOqeujpnNJoq0o1tVvfQYAA6WtI+iVP/T1btkNgt1cRJIUaUU3lPAnwFbgW3ASxFxS10dM5tVerwUXpVLj6UU68yPAVYCC8vVYPu3e61c/8irLtdvc0/qZUc3X3pUmcw8A3gyIp6NiH3ADcDb9m80sVz/wMEu129zVA1Pj86kKoliK3CKpAUqVnacDmypp1tms0yPX3pMezIzIu6WdD1wPzBC8Rx89sMrZnOBuvjWZ4qqVbg/A3ympr6YzU5dPv+QoqMrM8cGYPeKtHds3/J9yXH79+a9jIHh9CuuyNh2ec/i9GvM0QWjyW1zSurnPLsBcM7iFxqJvXPs4Kx+pBrcmXcdHxn/Q/v3NDhH4ERhZm05UZhZO71+6eF6FGbWlkcUZp3Q4yMKJwqzpsUcvz1qZok8ojCzVoQnM80sRee2FDxI0tfK798taU0d3XeiMGtaTU+PJm4peAHwQkT8G+DzwJ/U8RKcKMw6oUNbCpafX1N+fD1wevnQZiUdnaPQKMx7Ka3PowvSuzaW+yoyrhdznvzdtzC9cd+rGTk6o785JfUhb1l2znLv7w4/n9x2MKNMySur8y72+0bS2x70QnNLuGu66zHZloK/MFWbsrz/S8BhwHNVTuzJTLNOSM9vtWwpWDcnCrOm5dWaqLql4HibIUkDwCFA+vBuCm3Hv5K+JGmHpIcnfG2ZpFslPVb+mffIotkcU1MpvLZbCpafn1d+/OvAtyOi8s3ZlAvlq4Ez9/vaxcBtEXEscFv5uZlNpYbJzIgYAca3FNwCfH18S0FJ49sGXgUcJulx4EJq+r/Z9tIjIu6Y5F7s2cBp5cfXALcD/6WODpnNRh3cUnAY+I16zvYT052jWBER28qPtwMrpmooaT2wHmBwia9QbI6a6yszy+ufKd+GiVW4+12F2+aguVyu/xlJRwKUf+6or0tms1CPV+GebqKYOLN6HvCP9XTHbHaa9SMKSdcBdwJrJQ1JugDYALxL0mMUGwFtaLabZj2ux0cUKXc9zp3iW6fnnkxjMLgr7d3YO5yznDbvHc5ZTjs6P73tyIL0tn0j6a9vz9L0tsMxmN4J8qpl5yzLPnV++mB13s70n19fenH2bP3DzcXu5iSQwiszzZrW5ZcVKZwozDrBicLM2nHNTDNry5ceZtZal9/RSOFEYdYJThRm1spsqMLtRGHWCU4UZtaOqteOmVFOFGZN85aCecYGYHhZ2nLkeS+lx31lVV715AXb07O7MpZaH3HPq8ltn3lz+tLpnGHrp678YHrjTDnVsnOWZd+94Yrktmv/T97r27M1vbTBviarIPT2gMIjCrNO6PXJTG8AZNYJHXh6NKXotaQTJd0paZOkByX9h5TY063CfZmkR8oTfUPSoXkvyWwO6VyFq5Si17uBD0bEmyiKZn8h5f/vdKtw3wocHxEnAD8ALkmIYzZ3daYexcTtBK8Bfu2AbkT8ICIeKz9+mqI63eHtArdNFBFxB/Dj/b52S1k6HOAuio1IzGwS4wuuEkcUyyXdO+FYn3Gq5KLXAJLeAswD/qVd4DomM38b+FqLzrxWhXvAVbhtjtJY8nCh1U5hSPrfwBGTfOvSiZ9EREhTX8yUtW6/ApwXEW1v3lZKFJIuBUaAa6dqU+6buBFg/srMHWbNZoMaHwqLiDOm+p6kZyQdGRHbWhW9lrQE+CZwaUTclXLead/1kHQ+8F7gfXVsWWY2m2ks7aiobdHrcivCbwBfjojrUwNPK1FIOhP4JHBWROyeTgyzOaUzk5mTFr2WtE7SlWWb3wTeAZwv6YHyOLFd4LaXHmUV7tMoJlmGgM9Q3OU4CLhVEsBdEfE72S/LbI7oxIKriHieSYpeR8S9wH8sP/4q8NXc2NOtwn1V7omgGFoNJFY6nvdi+jv7ytF5/ejbnt52ZFF6P3YdcVBy2z3L0uOOHJ1eHrpvW0bZcGBwZ/oS9VcypphyqmXnLMt+9O1fTg8MrH38d9Mbp0845gmgx6/OvYTbrAP8UJiZteTCNWbWXoQvPcysPY8ozKw9Jwoza8cjCjNrLWju1muHOFGYdYBvj5pZe77rYWbteI7CzFrz3qN5oi+9JHr0pT+DEJnp+sW16W2Xbklv++ph6Q/j9u9JjzvyfPozJMsfyHsvct67vpH2baYjp6R+1rMbwKMfSt8K4JQ/aOa5xmJlZm9nCo8ozDqhxyczp1WFe8L3LpIUkpY30z2z2UERSUe3mm4VbiStBt4NbK25T2azS0SxjiLl6FLTqsJd+jxFlavufXVmXaJD+3o0ZlpzFJLOBp6KiO+XFa7MrJUuvqxIkV0zU9IC4A+BTye2Xz++R8Ho7oxdbs1mi+hMcd2ULQUntF0iaUjSX6TEnk5x3Z8BjgG+L+lfKTb/uV/SZHsNEBEbI2JdRKzrX9DkdtFmXWy8JkW7o5qULQXH/TFwR2rg7EQREQ9FxOsiYk1ErAGGgJMjIqMSpdkc0yVbCgJIejPFLmK3pAZOuT16HXAnsLYcqlyQGtzMChm3RxvdUlBSH/DnwCdy+j/dKtwTv78m54Rmc04Ao12zpeCHgZsiYijnRkRHV2YqMsq4N3gzZWTl3uS2O/ekL59esC197LhwKLkpo/PS34wd6/LGr/170mMf9EJ62/70HQaSl/UD2WsNcpZl33XZF5Pb9mfsjCHqW0xVw5aCbwXeLunDwCJgnqRXIqLVfIaXcJt1RGduj45vKbiBKbYUjIj3jX9cbgu6rl2SgAp7j5pZhs7c9UjZUnBaPKIwa1rQkYfCUrYU3O/rV1M8otGWE4VZB3TzA18pnCjMOsGJwsxaioCx3i5I4URh1gm9nSecKMw6wXMUZtaeE4WZteSdwvIMbx967pHPXvjDSb61HHiugVM6bvOxey3ulLFzlmUDR6c3rWUx1YzqbLn+iMMn+7qke1s9CDNdjtt87F6L23TsKTlRmFlLAYz29m0PJwqzxgWEE0UdNjpuo3GbjN1rcZuOPbkev/RQ9PgLMOt2h8xbEW87omX9p9d860eX39fx+ZME3TKiMJvdevwXshOFWSc4UZhZSxEwOjrTvajEicKsEzyiMLO2ejxRuGamWeM6s5t56paCkl4v6RZJWyRtlrSmXWwnCrOmBUSMJR0VpW4p+GXgsoj4WeAtTF7W/6c4UZh1QgdGFCRsKSjpOGAgIm4FiIhXImJ3u8BOFGadkF6uv9EtBYE3Ai9KukHS9yRdJqm/XWBPZpo1Le/2aNNbCg4AbwdOArYCXwPOB65q1SknCrMOiJqK69awpeAQ8EBEPFH+nX8ATqFNovClh1njEi87qt9CHd9SEKbYUhC4BzhU0nhtmHcCm9sFdqIwa9p4KbzmJzPbbikYEaPAJ4DbJD1EsR34X7cL7EsPs07oQD2K1C0FyzseJ+TEdqIwa1gA4eK6ZtZSuMKVmSWIHn961BWuzBom6VsUWwSkeC4izmyyP9PhRGFmbfn2qJm15URhZm05UZhZW04UZtaWE4WZtfX/AbkRPdHlTKXmAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "application/javascript": [ + "\n", + " setTimeout(function() {\n", + " var nbb_cell_id = 2;\n", + " var nbb_unformatted_code = \"import tensorflow as tf\\nfrom brainiak.matnormal.covs import CovDiagonal, CovAR1, CovUnconstrainedCholesky\\nfrom brainiak.utils.utils import cov2corr\\nfrom brainiak.matnormal.utils import (\\n make_val_and_grad,\\n pack_trainable_vars,\\n unpack_trainable_vars,\\n unflatten_cholesky_unique,\\n)\\nfrom brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row\\nfrom scipy.optimize import minimize\\n\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nrsa_cov = CovUnconstrainedCholesky(size=n_C)\\n\\nparams = (\\n rsa_cov.get_optimize_vars()\\n + time_cov.get_optimize_vars()\\n + space_cov.get_optimize_vars()\\n)\\n\\n# construct loss (marginal likelihood constructed automatically)\\n# note that params are ignored by this function but implicitly\\n# tracked by tf.GradientTape, and the remaining inputs are\\n# embedded via the closure mechanism\\ndef loss(params):\\n return -(\\n time_cov.logp\\n + space_cov.logp\\n + rsa_cov.logp\\n + matnorm_logp_marginal_row(\\n true_Y, row_cov=time_cov, col_cov=space_cov, marg=true_X, marg_cov=rsa_cov\\n )\\n )\\n\\n\\nval_and_grad = make_val_and_grad(lossfn=loss, train_vars=params)\\n\\nx0 = pack_trainable_vars(params)\\n\\nopt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method=\\\"L-BFGS-B\\\")\\n\\nfit_params = unpack_trainable_vars(opt_results.x, params)\\n\\nfor var, val in zip(params, fit_params):\\n var.assign(val)\\n\\nU = rsa_cov._cov.numpy()\\nC = cov2corr(U)\\nplt.matshow(C)\\nplt.colorbar()\";\n", + " var nbb_formatted_code = \"import tensorflow as tf\\nfrom brainiak.matnormal.covs import CovDiagonal, CovAR1, CovUnconstrainedCholesky\\nfrom brainiak.utils.utils import cov2corr\\nfrom brainiak.matnormal.utils import (\\n make_val_and_grad,\\n pack_trainable_vars,\\n unpack_trainable_vars,\\n unflatten_cholesky_unique,\\n)\\nfrom brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row\\nfrom scipy.optimize import minimize\\n\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nrsa_cov = CovUnconstrainedCholesky(size=n_C)\\n\\nparams = (\\n rsa_cov.get_optimize_vars()\\n + time_cov.get_optimize_vars()\\n + space_cov.get_optimize_vars()\\n)\\n\\n# construct loss (marginal likelihood constructed automatically)\\n# note that params are ignored by this function but implicitly\\n# tracked by tf.GradientTape, and the remaining inputs are\\n# embedded via the closure mechanism\\ndef loss(params):\\n return -(\\n time_cov.logp\\n + space_cov.logp\\n + rsa_cov.logp\\n + matnorm_logp_marginal_row(\\n true_Y, row_cov=time_cov, col_cov=space_cov, marg=true_X, marg_cov=rsa_cov\\n )\\n )\\n\\n\\nval_and_grad = make_val_and_grad(lossfn=loss, train_vars=params)\\n\\nx0 = pack_trainable_vars(params)\\n\\nopt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method=\\\"L-BFGS-B\\\")\\n\\nfit_params = unpack_trainable_vars(opt_results.x, params)\\n\\nfor var, val in zip(params, fit_params):\\n var.assign(val)\\n\\nU = rsa_cov._cov.numpy()\\nC = cov2corr(U)\\nplt.matshow(C)\\nplt.colorbar()\";\n", + " var nbb_cells = Jupyter.notebook.get_cells();\n", + " for (var i = 0; i < nbb_cells.length; ++i) {\n", + " if (nbb_cells[i].input_prompt_number == nbb_cell_id) {\n", + " if (nbb_cells[i].get_text() == nbb_unformatted_code) {\n", + " nbb_cells[i].set_text(nbb_formatted_code);\n", + " }\n", + " break;\n", + " }\n", + " }\n", + " }, 500);\n", + " " + ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -237,100 +289,128 @@ ], "source": [ "import tensorflow as tf\n", - "from brainiak.matnormal.covs import (CovDiagonal, CovAR1,\n", - " CovUnconstrainedInvCholesky)\n", + "from brainiak.matnormal.covs import CovDiagonal, CovAR1, CovUnconstrainedCholesky\n", "from brainiak.utils.utils import cov2corr\n", + "from brainiak.matnormal.utils import (\n", + " make_val_and_grad,\n", + " pack_trainable_vars,\n", + " unpack_trainable_vars,\n", + " unflatten_cholesky_unique,\n", + ")\n", "from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row\n", - "from tensorflow.contrib.opt import ScipyOptimizerInterface\n", + "from scipy.optimize import minimize\n", "\n", "space_cov = CovDiagonal(size=n_V)\n", "time_cov = CovAR1(size=n_T)\n", - "rsa_cov = CovUnconstrainedInvCholesky(size=n_C)\n", "\n", - "# inputs into TF\n", - "X = tf.constant(true_X)\n", - "Y = tf.constant(true_Y)\n", + "rsa_cov = CovUnconstrainedCholesky(size=n_C)\n", "\n", - "params = rsa_cov.get_optimize_vars() +\\\n", - " time_cov.get_optimize_vars() +\\\n", - " space_cov.get_optimize_vars()\n", + "params = (\n", + " rsa_cov.get_optimize_vars()\n", + " + time_cov.get_optimize_vars()\n", + " + space_cov.get_optimize_vars()\n", + ")\n", "\n", - "# tf session\n", - "sess = tf.Session()\n", + "# construct loss (marginal likelihood constructed automatically)\n", + "# note that params are ignored by this function but implicitly\n", + "# tracked by tf.GradientTape, and the remaining inputs are\n", + "# embedded via the closure mechanism\n", + "def loss(params):\n", + " return -(\n", + " time_cov.logp\n", + " + space_cov.logp\n", + " + rsa_cov.logp\n", + " + matnorm_logp_marginal_row(\n", + " true_Y, row_cov=time_cov, col_cov=space_cov, marg=true_X, marg_cov=rsa_cov\n", + " )\n", + " )\n", "\n", - "# initialize\n", - "sess.run(tf.global_variables_initializer())\n", "\n", - "# construct loss (marginal likelihood constructed automatically)\n", - "loss = -(time_cov.logp +\n", - " space_cov.logp +\n", - " rsa_cov.logp +\n", - " matnorm_logp_marginal_row(Y,\n", - " row_cov=time_cov,\n", - " col_cov=space_cov,\n", - " marg=X, marg_cov=rsa_cov))\n", + "val_and_grad = make_val_and_grad(lossfn=loss, train_vars=params)\n", + "\n", + "x0 = pack_trainable_vars(params)\n", "\n", - "# tie into scipy's optimizers\n", - "optimizer = ScipyOptimizerInterface(loss,\n", - " var_list=params,\n", - " method=\"L-BFGS-B\")\n", + "opt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method=\"L-BFGS-B\")\n", "\n", + "fit_params = unpack_trainable_vars(opt_results.x, params)\n", "\n", - "optimizer.minimize(sess)\n", + "for var, val in zip(params, fit_params):\n", + " var.assign(val)\n", "\n", - "U = rsa_cov.Sigma.eval(session=sess)\n", + "U = rsa_cov._cov.numpy()\n", "C = cov2corr(U)\n", - "plt.matshow(C)" + "plt.matshow(C)\n", + "plt.colorbar()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In practice, MN-RSA is actually implemented in `brainiak.matnormal`, including the nuisance regressor estimation of Cai et al. " + "In practice, MN-RSA is already implemented in brainiak.matnormal, including the nuisance regressor estimation of Cai et al." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Optimization terminated with:\n", - " Message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'\n", - " Objective function value: 17691.188228\n", - " Number of iterations: 8723\n", - " Number of functions evaluations: 9173\n" - ] - }, { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 5, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEftJREFUeJzt3X1s3fV1x/HPsa8d23mObWiwozl0\nNIyibKDQptB1XSksAxT4YxK0Zc2ALerWjYBaUVCkof4xqVKrQqd1MMZD0YjoJEpbxCglokBXbc0I\ngYSEAAltIIFATBAJtmtfP5z9cW++MpYd+557f7/rwPslRfHDPT4/X9sf/+71PfeYuwsAJKmh3gcA\nYPYgEAAkBAKAhEAAkBAIABICAUBS90AwszVm9pKZ7TWzGzPutczMnjCz3Wa2y8w2ZNlvXN9GM3vW\nzB7OodciM3vAzF4sf56fyrjf9eXrcqeZ3W9mLTX++Heb2SEz2znubUvMbLOZ7Sn/vzjjft8uX587\nzOzHZrYoy37j3vd1M3Mz66hVv+nUNRDMrFHS9yX9uaQzJH3BzM7IsOWIpK+5+x9IWi3pqxn3O2aD\npN059JGk70l61N1Pl/SHWfY1sy5J10pa5e5nSmqUdEWN2/xA0poJb7tR0uPufpqkx8uvZ9lvs6Qz\n3X2lpJcl3ZRxP5nZMkkXSHqthr2mVe8zhE9I2uvuv3H3oqQfSro0q2buftDdt5Vffk+lH5aurPpJ\nkpl1S7pY0p1Z9in3WiDpM5LukiR3L7r7uxm3LUhqNbOCpDZJb9Tyg7v7LyW9M+HNl0q6t/zyvZIu\ny7Kfuz/m7iPlV38tqTvLfmW3SLpBUq6PHKx3IHRJ2j/u9QPK+Af0GDPrkXSWpC0Zt7pVpS/sWMZ9\nJOlUSb2S7infRLnTzOZm1czdX5f0HZV+ix2UdMTdH8uq3zgnu/vB8jEclHRSDj2PuVrSz7JsYGZr\nJb3u7tuz7DOZegeCTfK2zBPRzOZJ+pGk69z9aIZ9LpF0yN2fyarHBAVJZ0u6zd3PktSv2p5Ov0/5\ntvulkpZLOkXSXDO7Mqt+9WZmG1W62bkpwx5tkjZK+sesehxPvQPhgKRl417vVo1POScysyaVwmCT\nuz+YZS9J50laa2b7VLo59Dkzuy/DfgckHXD3Y2c9D6gUEFn5vKTfunuvuw9LelDSuRn2O+YtM1sq\nSeX/D2Xd0MzWSbpE0pc82wGgj6oUsNvL3zfdkraZ2Ucy7JnUOxCelnSamS03s2aV7pB6KKtmZmYq\n3b7e7e7fzarPMe5+k7t3u3uPSp/bL9w9s9+g7v6mpP1mtqL8pvMlvZBVP5VuKqw2s7bydXu+8rnz\n9CFJ68ovr5P00yybmdkaSd+QtNbdB7Ls5e7Pu/tJ7t5T/r45IOns8tc2e+5e13+SLlLpnttXJG3M\nuNenVbpJskPSc+V/F+X0eX5W0sM59PkjSVvLn+NPJC3OuN83Jb0oaaek/5A0p8Yf/36V7p8YVumH\n4xpJ7Sr9dWFP+f8lGffbq9J9Xce+Z27Pst+E9++T1JH1982xf1ZuCgB1v8kAYBYhEAAkBAKAhEAA\nkBAIAJJZEwhmtp5+9JttvT4M/cabNYEgKe8rgX4nbr8P8udWj37JbAoEAHWW6wOTOpY0es+ypknf\n13t4VJ3tjZO+b9ehzlC/xsGpP7eRoX4V5kw+CDjaMtnM1fT8OGWjA/1qbJti8DAYy8f7yo3196th\n7uT9CoOxfoWB0SnfVxwZUHOhbfJ3Rr/HbPIrtDjSr+bCcYY4g/0G2wuTvv1416UkNffF+o0VJv/8\nRgb7VWiZul/jYOWDs4ND76o43D/tN/bk10BGepY16f9+vmz6C05w5j//Xajfkt0j019oEu+cHrta\nxibPummNtga/oSbPz2kteilW1/HskVCdFWNfB2+IJaUNx/rtuTr2i6frqVi/300RQNNZtKe/4pot\nO26f0eW4yQAgqSoQ8nw+RADZCwdCHZ4PEUDGqjlDyPX5EAFkr5pAqNvzIQLIRjWBMKPnQzSz9Wa2\n1cy29h6e+s9WAOqvmkCY0fMhuvsd7r7K3VdN9TgDALNDNYGQ6/MhAshe+IFJ7j5iZn8v6ecqbey5\n29131ezIAOSuqkcquvsjkh6p0bEAqDMeqQggyXWWYdehztBcws5r/zXU74LLrwrVzX0jdudnYzE4\nVLM4NkzVEPyjzUhrrN/eLy4M1S0Obobo644d57z9sa+DBWew+rpiP0bDbbHPr7+7teKasRdn9ruf\nMwQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEgIBAAJgQAgIRAAJAQCgIRAAJDkOu3YOOihbUrR\nqcXN/3lPqO7iT18WqhtbMMUqs2kUeuaF6gY6YlOZ0dVjC14JlWlwSWyqb/Dk2Djn4MeGQ3WnfXlb\nqK645pxQ3asXx34fn/RcseKahuGZrX/jDAFAQiAASAgEAEk1q9yWmdkTZrbbzHaZ2YZaHhiA/FVz\np+KIpK+5+zYzmy/pGTPb7O7BJ8wCUG/hMwR3P+ju28ovvydpt1jlBpzQanIfgpn1SDpL0pZafDwA\n9VF1IJjZPEk/knSdux+d5P1pt+PIUH+17QBkqKpAMLMmlcJgk7s/ONllxu92LMyZW007ABmr5q8M\nJukuSbvd/bu1OyQA9VLNGcJ5kv5S0ufM7Lnyv4tqdFwA6qCaZa+/khR7kDqAWYlHKgJIcp12HG0x\nvXN65S2juxajU4v/9aufhOp+f9Pfhurm74udaDX1x6YWhxYGpw87Y/1aekNl6tga+33VMNocqnvv\n8tWhusVPvxmqW/FkrM6WL6u8ZoRpRwAVIhAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAk\nBAKAhEAAkBAIAJJcpx3dpLGmyusai7Epu+iuxejU4t4v3Raq++SNsX7F+bGpxeKiUJk6ts9sYq5W\nmo9UvgdUkrwQ+z33Xlfsx6F/RWeo7uiFS0N17bsGK67x12Z2nXCGACAhEAAkBAKApBZ7GRrN7Fkz\ne7gWBwSgfmpxhrBBpTVuAE5w1S5q6ZZ0saQ7a3M4AOqp2jOEWyXdICnfv0cByEQ1m5sukXTI3Z+Z\n5nJpt+PoALsdgdms2s1Na81sn6QfqrTB6b6JFxq/27Gxjd2OwGwWDgR3v8ndu929R9IVkn7h7lfW\n7MgA5I7HIQBIajLL4O5PSnqyFh8LQP1whgAgyXXaUQ3SaGvlk4uDi2NTfYWeeaG66K7F6NTilm/F\npiT/9Kq/DtXNeWcoVPf2ytj12RAbWtTgktiOxrbe0VCdB38aigtju0db3o1N8R5d3lJxzeguph0B\nVIhAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABI8t3tKGksMBjWEBte\n00BHbAqtqT82hRbdtRidWnzintiTXZ97/VdCdQv2D4fqbDR2fTYUY8/d23Q49tydb5zXHqrreaQY\nqju4uvKpRUnqeqryz69xcGbXJWcIABICAUBCIABIqt3ctMjMHjCzF81st5l9qlYHBiB/1d6p+D1J\nj7r7X5hZs6S2GhwTgDoJB4KZLZD0GUl/JUnuXpQUu7sVwKxQzU2GUyX1SrqnvA7+TjNjNRNwAqsm\nEAqSzpZ0m7ufJalf0o0TLzR+t+NYP7sdgdmsmkA4IOmAu28pv/6ASgHxPuN3OzbM5QQCmM2q2e34\npqT9Zrai/KbzJb1Qk6MCUBfV/pXhHyRtKv+F4TeSrqr+kADUS1WB4O7PSVpVo2MBUGc8UhFAkuu0\nY2FQWvRS5XUjrbEpwua+2JTd0MJYv+KiUFl412J0avF/brk9VHfOxtjuyoX7Yp/f4Y/HpgEbRmJ1\n7TtCZeo7JbaDsnNHbHq0b1lrxTVjL7DbEUCFCAQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEgI\nBAAJgQAgIRAAJAQCgCTfaceBUXU8e6Tiur1fXBjqt+CVUJkGO2NTkh3bY7sI3145L1QX3bUYnVp8\n+p9uC9VddzD2lBmNil2fQ2NNobqXN5we69c+J1Q33Bb7fVwYClwvPrPvac4QACQEAoCEQACQVLvb\n8Xoz22VmO83sfjOLPVUNgFkhHAhm1iXpWkmr3P1MSY2SrqjVgQHIX7U3GQqSWs2soNKi1zeqPyQA\n9VLNopbXJX1H0muSDko64u6P1erAAOSvmpsMiyVdKmm5pFMkzTWzKye5XNrtWBwZiB8pgMxVc5Ph\n85J+6+697j4s6UFJ50680Pjdjs2FtiraAchaNYHwmqTVZtZmZqbSbsfdtTksAPVQzX0IW1Ta+LxN\n0vPlj3VHjY4LQB1Uu9vxZkk31+hYANQZj1QEkOQ67Sh3WXGk4rLFL8TaDS6J7Whs6Y31i2qo/CqR\nJNlobCozumsxOrV469Ktobpv9p4Rqutujj0cZs/YilDdWCH2fWaxL5/6ljZWXDPWNLNj5AwBQEIg\nAEgIBAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAk+U47mskbKs+gvu7YNNng\nyaOhuo6tsZxsPhIbWxxc0hyqayjGdh8e/nhsfUZ012J0avHmztiYa7Rf4a3K945K0sDKuaG6xtjQ\nqVoPVz4mOdOJWs4QACQEAoCEQACQTBsIZna3mR0ys53j3rbEzDab2Z7y/4uzPUwAeZjJGcIPJK2Z\n8LYbJT3u7qdJerz8OoAT3LSB4O6/lPTOhDdfKune8sv3SrqsxscFoA6i9yGc7O4HJan8/0m1OyQA\n9ZL54xDMbL2k9ZLU0rQg63YAqhA9Q3jLzJZKUvn/Q1Nd8P27HWMP4ACQj2ggPCRpXfnldZJ+WpvD\nAVBPM/mz4/2S/lfSCjM7YGbXSPqWpAvMbI+kC8qvAzjBTXsfgrt/YYp3nV/jYwFQZzxSEUCS/27H\n4conAuftjy3BG/zYcKiuYTQ2feiFWL629camMpsO94fqGkZi045DY02huuiuxbynJC/q6wrVNfcF\nlzQGDc2vfPp3bIbrIDlDAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABI\nCAQASa7TjoPtBe25urPiOgsOk5325W2huvcuXx2r64pdnR78KrxxXnuorn1HrN/LG04P1e0ZWxGq\ni+5ajE4tPrJ9c6juj796aqhupCW2s7S4qPI6Z9oRQKUIBAAJgQAgie52/LaZvWhmO8zsx2a2KNvD\nBJCH6G7HzZLOdPeVkl6WdFONjwtAHYR2O7r7Y+5+7MkRfy2pO4NjA5CzWtyHcLWkn031TjNbb2Zb\nzWzrWH/sSUEB5KOqQDCzjZJGJG2a6jLjV7k1zGWVGzCbhR+YZGbrJF0i6Xx3z/d5qAFkIhQIZrZG\n0jck/Ym7D9T2kADUS3S3479Imi9ps5k9Z2a3Z3ycAHIQ3e14VwbHAqDOeKQigCTXacfmPlfXU5Xv\nduwLThEW15wTqlv89Juhuv4VlU9ySlJx4QxH0SboeaQYqus7Jba7cqh9TqhurBCb6htYGfurVHTX\nYnRq8b+//2+hujVrrwzVmc+ruKZxhmtOOUMAkBAIABICAUBCIABICAQACYEAICEQACQEAoCEQACQ\nEAgAEgIBQEIgAEgIBABJrtOOYwXT79orbzncFpuWe/XiWN6teDI27Xj0wqWhupZ3Y9N5B1e3hOo6\nd8xw9G2C4bbY9Rndzdk4FKuLiu5ajE4tPvrQfaG6z17zNxXX2MjMvgicIQBICAQASWiV27j3fd3M\n3Mw6sjk8AHmKrnKTmS2TdIGk12p8TADqJLTKrewWSTdIYicD8AERug/BzNZKet3dt9f4eADUUcV/\nAzSzNkkbJV04w8uvl7RekprnLq60HYAcRc4QPippuaTtZrZPpc3P28zsI5NdePxux0ILux2B2azi\nMwR3f17SScdeL4fCKnd/u4bHBaAOoqvcAHwARVe5jX9/T82OBkBd8UhFAAmBACAx9/weV7RgXpd/\ncuVXKq7r724N9Wvtje0+bDrUF6ob7qh8554kHV0em1pcuHcgVNe3LHZ9FobGYv2WxnZXth6OfW8O\nzY9NLRYXxermvR67XpqPjIbqnrzr3yuu+cSf7dfW7YPTfoKcIQBICAQACYEAICEQACQEAoCEQACQ\nEAgAEgIBQEIgAEgIBAAJgQAgIRAAJAQCgCTXaUcz65X06hTv7pCU59Ow0e/E7fdB/tyy6vd77t45\n3YVyDYTjMbOt7r6KfvSbTb0+DP3G4yYDgIRAAJDMpkC4g370m4W9Pgz9kllzHwKA+ptNZwgA6oxA\nAJAQCAASAgFAQiAASP4f9ExjOWXvAGoAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAECCAYAAAD+eGJTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAASXklEQVR4nO3dfWzc9X0H8PfbZ5/PThySkDhPtkjUBgjLWBJlFOjDptKGDCJCNdpRjQrWToh2W2mFxAhoQ9W0rRNVKdIGHQqUqER0UwgPYi1NRotQxcIGIYE8FpoHMCQkxCFOzknO5/vsj7t0jnd34T6/h3P4vl9SFPvuPv5872y//buH731oZhCRcLU0ewEi0lwKAZHAKQREAqcQEAmcQkAkcAoBkcA1PQRILiW5k+SbJO9IuFcvyV+S3EZyK8lbk+w3om+G5Kskn0mh10SSa0juILmd5GUJ9/t25bbcQvIxkrmYv/7DJA+Q3DLitMkk15N8o/L/pIT73VO5PV8j+QTJiUn2G3HebSSN5JS4+lXT1BAgmQHwLwD+CMBFAL5M8qIEWxYB3GZmFwG4FMBfJNzvlFsBbE+hDwDcB+BZM7sQwO8l2ZfkLADfBLDYzOYDyAC4PuY2jwBYOuq0OwA8Z2ZzATxX+TzJfusBzDeziwH8GsCKhPuBZC+AJQDeirFXVc0+ErgEwJtmtsvMCgB+AmB5Us3MbJ+Zbax8fBTlX5BZSfUDAJI9AK4GsDLJPpVe5wD4DICHAMDMCmb2QcJtWwF0kGwF0Ang3Ti/uJm9AKB/1MnLAayqfLwKwLVJ9jOzdWZWrHy6AUBPkv0q7gVwO4DEX83X7BCYBeDtEZ/3IeFfylNIzgawEMBLCbf6AcrfzFLCfQBgDoCDAH5UufuxkuS4pJqZ2TsAvofyX6t9AI6Y2bqk+o0wzcz2VT7eD2BaCj1P+SqAnyXZgORyAO+Y2eYk+5zS7BBoCpLjATwO4FtmNpBgn2UADpjZK0n1GKUVwCIAD5jZQgB5xHuofJrKffHlKIfPTADjSN6QVL9qrPy691Re+07yLpTvUq5OsEcngDsB/G1SPUZrdgi8A6B3xOc9ldMSQ7IN5QBYbWZrk+wF4JMAriG5B+W7Op8l+WiC/foA9JnZqaObNSiHQlI+B2C3mR00syEAawFcnmC/U94jOQMAKv8fSLohyZsALAPwp5bshpuPoRyqmys/Nz0ANpKcnlTDZofA/wCYS3IOySzKDyo9nVQzkkT5/vJ2M/t+Un1OMbMVZtZjZrNRvm6/MLPE/lKa2X4Ab5O8oHLSFQC2JdUP5bsBl5LsrNy2VyCdB0CfBnBj5eMbATyVZDOSS1G+S3eNmQ0m2cvMXjezbjObXfm56QOwqPK9TaxpU/8BuArlR1x/A+CuhHt9CuVDx9cAbKr8uyql6/mHAJ5Joc8CAC9XruOTACYl3O87AHYA2ALgxwDaY/76j6H8eMNQ5RfiawDORflZgTcA/CeAyQn3exPlx65O/cz8MMl+o87fA2BKkt9DVhqJSKCafXdARJpMISASOIWASOAUAiKBUwiIBG7MhADJm9VP/cZarxD6jZkQAJDqFVe/s7rfR/m6pd5vLIWAiDRBqi8WmjI5Y7N726qed/DQMKaem6l63rb9U139jLXPGx7MI9NZfYNdS7HqyWfUMlT7thwq5NGWrd5vqKvOQuvIDtTuVyjkka3RjycKrn71FErHkW3pqHpeKZd1fc2WQvVvRGH4OLKZ6r3KDX0/09bWWvX0oWIeba11NmP6vn0oZav/DR46mUdbe+1+peq/JnUVjvajeCJfdaXVr3VCZve24b9/3nvmC46y8B++4eo33O4qQ3u/74do/D5ferz7ad+3oXf9SVdd+45E92j9P8fn+7bf5/YedtXxuO92GZ4x2VVXavMdUOd76gRZHScnNJ46O564t+Z5ujsgErhIIZDm+wOKSDLcIdCE9wcUkQREORJI9f0BRSQZUUKgae8PKCLxSfyBQZI3k3yZ5MsHDw0n3U5EGhQlBD7U+wOa2YNmttjMFtd6HYCINE+UEEj1/QFFJBnuFwuZWZHkXwL4OcqTZx42s62xrUxEUhHpFYNm9lMAP41pLSLSBHrFoEjgUt07sG3/VNc+gFfvvN/Vb8l1N575QlUc6/W9pjvXd9RV17XHN1S3/0Lf5ojuo74ht6V2549Li2+HzYk5vtfyezcQZYZ8k+KGOn23y+BU39/grr7G96i0FGvfJjoSEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREAqcQEAlcqrsIjb6pQN7dgOvWrHLVLfq7r7vqOO8cV13BMVEGALo3nnDVDXy8y1U3cd1OV13mpG8iEDtyrjoM+97LcvCy8339nH9K24/4di0OnNf4r20pW/tnTEcCIoFTCIgETiEgErgoY8h6Sf6S5DaSW0neGufCRCQdUR4YLAK4zcw2kuwC8ArJ9Wa2Laa1iUgK3EcCZrbPzDZWPj4KYDs0hkzkrBPLYwIkZwNYCOClOL6eiKQncgiQHA/gcQDfMrOBKuf/dhbh8GA+ajsRiVmkECDZhnIArDaztdUuM3IWYaZzXJR2IpKAKM8OEMBDALab2ffjW5KIpCnKkcAnAXwFwGdJbqr8uyqmdYlISqIMJP0VAN+L3kVkzNArBkUCl+ouwpYi0N7f+Jw472xA727AjX/zgKvu81+6yVV36Hd91+/QPN8uu+77X3TV5Zdd4qrLflBw1bUUfLsBSx2+H+vcgUFXXeaw71mvgStnuOoGZzb+O1Rqq32ejgREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREApfuLsIhw/h9xYbrcn1HXf28swG9uwHX//sjrrqrP3Wtq847c+/IdZ9w1fXPy7jqJuzy/a0p5nxvV3Fiiq9uhm9zJd5fMMFVN+3FD1x1bfmJDdfsqzO2UkcCIoFTCIgETiEgErg45g5kSL5K8pk4FiQi6YrjSOBWlEeQichZKOrwkR4AVwNYGc9yRCRtUY8EfgDgdgClGNYiIk0QZQLRMgAHzOyVM1zut7MIhwqaRSgy1kSdQHQNyT0AfoLyJKJHR19o5CzCtqxmEYqMNe4QMLMVZtZjZrMBXA/gF2Z2Q2wrE5FU6HUCIoGLZe+AmT0P4Pk4vpaIpEtHAiKBS3UX4VAX8e6nG2/ZtWeSq19hgm83mXc2oHc34H/86klX3cK//4arbtqGI666cWt3uOpaZ0531SHj/BuV8e12LO7e66rrPjTXVVfqqDMgsI7c4cZ3j7YUa88v1JGASOAUAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAROISASOIWASOBS3UWYHTD0rj/ZcF3/he2uft0b6wxgq+PQvJyrzjsb0Lsb8NW77nfVLV3+FVfdwJ/8vqtucLrvb03mRO2db/WMe8/3vrf5JTNcdXS+zW5Hv69w/JN139azKhYHa56nIwGRwCkERAKnEBAJXNQJRBNJriG5g+R2kpfFtTARSUfUBwbvA/CsmV1HMgugM4Y1iUiK3CFA8hwAnwFwEwCYWQFAIZ5liUhaotwdmAPgIIAfVUaTrySpEUMiZ5koIdAKYBGAB8xsIYA8gDtGX2jkLMKCZhGKjDlRQqAPQJ+ZvVT5fA3KoXCakbMIs5pFKDLmRJlFuB/A2yQvqJx0BYBtsaxKRFIT9dmBvwKwuvLMwC4AfxZ9SSKSpkghYGabACyOaS0i0gR6xaBI4Gjm26nlcU622y6f8qWG64ZnTXH1G/h4l6uu6982uOry133CVTd+9zFXHVp8Gf7sUz921S34R99ux3O3Nr5zFACGc77rl5/uO8Cd+BvfrtNdX/Dtcp35gu93j8ON1216/j4cO9xXdTinjgREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREApfqLEKvUrtzV9i6na66/LJLXHX98zKuunFrd7jqvLMBvbsBN63wzT786/cWuOou7nzbVZfjkKvu/lu+6Krr2uX7W9r1+n5X3ZGF3Y0XseoGQgA6EhAJnkJAJHAKAZHARZ1F+G2SW0luIfkYyVxcCxORdLhDgOQsAN8EsNjM5gPIALg+roWJSDqi3h1oBdBBshXlYaTvRl+SiKQpyvCRdwB8D8BbAPYBOGJm6+JamIikI8rdgUkAlqM8mHQmgHEkb6hyuf+bRVg67l+piCQiyt2BzwHYbWYHzWwIwFoAl4++0GmzCFs6IrQTkSRECYG3AFxKspMkUZ5FuD2eZYlIWqI8JvASypOINwJ4vfK1HoxpXSKSkqizCO8GcHdMaxGRJtArBkUCl+ouwlIui+PzexovbKm9A6qezEnfDLzsBwVX3QTnbrLWmdNddYPTff2mbPbdLt7dgP80bZOr7u6Dv+OqO1QY76obbvfdnlM3+5714tG8q64tP9x4r1Lt+YU6EhAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJXKq7CFsKReT2Hm647sScya5+7PCNQWgpNL5LCwCKOd9uR2R8WZw5UXtnWD3DOV8/72xA727A70zd6qp7/NgEV93OwYtcdYfP9/2cTfONykTnnoGGa1pO1v6Z1pGASOAUAiKBUwiIBO6MIUDyYZIHSG4ZcdpkkutJvlH5f1KyyxSRpHyYI4FHACwdddodAJ4zs7kAnqt8LiJnoTOGgJm9AKB/1MnLAayqfLwKwLUxr0tEUuJ9TGCame2rfLwfwLSY1iMiKYv8wKCZGYCaT1ifNotwWLMIRcYabwi8R3IGAFT+P1DrgqfNIsxoFqHIWOMNgacB3Fj5+EYAT8WzHBFJ24d5ivAxAP8F4AKSfSS/BuC7AD5P8g2UpxN/N9llikhSzrh3wMy+XOOsK2Jei4g0gV4xKBK4VHcRomTgccccvDpz1Ooa9u0GLHX4bpYTU7y7CDOusnHvlVx1+em+65fjkKvOOxvQuxvwj8c3vssOAP614Ls9S75vH8pPrDn6ZRv//lmdeZ46EhAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJXKq7CK2tFcMzGp8rmBny7e4avOx8V13uwKCrbsaLrjIUd+911eWXzHDVnbv9hKvu/lu+6Kobbvf9rfHOBvTuBlz3+KozX6iKC1d+3VU3PGe6q67lWKHhGtbZsKgjAZHAKQREAqcQEAmcdxbhPSR3kHyN5BMkJya7TBFJincW4XoA883sYgC/BrAi5nWJSEpcswjNbJ2ZFSufbgDQk8DaRCQFcTwm8FUAP6t15sgxZEPFfAztRCROkUKA5F0AigBW17rMyDFkba3jorQTkQS4XyxE8iYAywBcYd73ThaRpnOFAMmlAG4H8Adm5nt5nYiMCd5ZhP8MoAvAepKbSP4w4XWKSEK8swgfSmAtItIEesWgSODSnUVIoNTWeO4MdTqX6Yy4zGHfU5nvL/DNzus+NNdVR99mOez6QrurrmuX7waduvm4q+7w+TlXnXc2oHc34I4/f8BVd+nOW1x1lmn8Wbbiu7VvFB0JiAROISASOIWASOAUAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAQu1V2EpWwL8j0dDdcNTvVlVfsR3za7gSt9M/6mvfiBq67U0eaq6+j3Xb/OF1xl6Hp9v6uOR327MqftdJXB+2533tmA3t2AG+7xvRfP0mtuaLgmc3y45nk6EhAJnEJAJHCuMWQjzruNpJGckszyRCRp3jFkINkLYAmAt2Jek4ikyDWGrOJelN92XDMHRM5irscESC4H8I6ZbY55PSKSsoafIiTZCeBOlO8KfJjL3wzgZgDIdk5qtJ2IJMxzJPAxAHMAbCa5B+WJxBtJVn2S9bRZhO2aRSgy1jR8JGBmrwPoPvV5JQgWm9n7Ma5LRFLiHUMmIh8R3jFkI8+fHdtqRCR1esWgSOAUAiKBS3cXYQY4OYEN13X1FV39Bs7zXb3Bmb7XP7XlJ7rqcodr7/CqZ/yTr7jqBq9e5Ko7srD7zBeqoi3vu36dewZcdaWs7/vecqzgqvPMBgR8uwEB4NmnH2245pIrq73er0xHAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAROISASOIWASOAUAiKBo3dum6sZeRDA3hpnTwGQ5luUqd/Z2++jfN2S6neemU2tdkaqIVAPyZfNbLH6qd9Y6hVCP90dEAmcQkAkcGMpBB5UP/Ubg70+8v3GzGMCItIcY+lIQESaQCEgEjiFgEjgFAIigVMIiATufwHjWWOKGPDxxgAAAABJRU5ErkJggg==\n", "text/plain": [ - "" + "
" ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAECCAYAAAD+eGJTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAR9UlEQVR4nO3de4yc5XXH8e/x3tdefMHr+xJbBEG4JIBcCglJqpiASwiOVP4AlcSQSEg0LSRFQhDaRlX7R6SkIVRNnSJuVnBIJSAFIS52SQJEakjAxmCwAyYx+LL4Bovttde7sz79Y8bt4uysPWfe9501z+8jWd7dmbPn2Z3Z377z7jxzzN0RkXRNaPQCRKSxFAIiiVMIiCROISCSOIWASOIUAiKJa3gImNliM/udmW00s1ty7tVjZr8ws9fM7FUzuzHPfiP6NpnZGjN7rIBeU8zsQTPbYGbrzeyCnPt9s/K9XGdmD5hZe8af/x4z22Fm60Z8bJqZrTKzNyr/T82533cr38+XzexnZjYlz34jLrvJzNzMpmfVbzQNDQEzawJ+CPw5cDpwlZmdnmPLEnCTu58OnA98Ped+h90IrC+gD8AdwJPufhrwiTz7mtlc4AZgobufCTQBV2bc5j5g8REfuwV42t1PAZ6uvJ9nv1XAme7+ceB14Nac+2FmPcDFwNsZ9hpVo48EzgM2uvvv3X0Q+CmwJK9m7t7r7qsrb++l/AMyN69+AGY2D/gCcFeefSq9JgOfAe4GcPdBd+/LuW0z0GFmzUAnsC3LT+7uzwLvHvHhJcDyytvLgS/l2c/dV7p7qfLur4F5efaruB24Gcj92XyNDoG5wOYR728h5x/Kw8xsPnAO8HzOrX5A+cY8lHMfgAXATuDeysOPu8xsYl7N3H0r8D3Kv616gffdfWVe/UaY6e69lbffAWYW0POwrwJP5NnAzJYAW919bZ59Dmt0CDSEmU0CHgK+4e57cuxzGbDD3V/Mq8cRmoFzgWXufg7QT7aHyh9QeSy+hHL4zAEmmtnVefUbjZef917Ic9/N7DbKDylX5NijE/gW8A959ThSo0NgK9Az4v15lY/lxsxaKAfACnd/OM9ewKeAy81sE+WHOp8zs/tz7LcF2OLuh49uHqQcCnm5CPiDu+909yHgYeCTOfY7bLuZzQao/L8j74Zmdg1wGfCXnu+Gm5Mph+rayv1mHrDazGbl1bDRIfBb4BQzW2BmrZRPKj2aVzMzM8qPl9e7+/fz6nOYu9/q7vPcfT7lr+3n7p7bb0p3fwfYbGanVj60CHgtr36UHwacb2adle/tIoo5AfoosLTy9lLgkTybmdliyg/pLnf3/Xn2cvdX3H2Gu8+v3G+2AOdWbtvcmjb0H3Ap5TOubwK35dzrQsqHji8DL1X+XVrQ1/lnwGMF9DkbeKHyNf4XMDXnfv8IbADWAT8G2jL+/A9QPt8wVPmB+BpwIuW/CrwB/DcwLed+Gymfuzp8n/lRnv2OuHwTMD3P29AqjUQkUY1+OCAiDaYQEEmcQkAkcQoBkcQpBEQSN25CwMyuUz/1G2+9Uug3bkIAKPQLV7/jut+H+WsrvN94CgERaYBCnyw0fVqTz+9pGfWynbuH6T6xadTL1m/rDvWz4eqXDR3sp6Vt9A12Npz992RosJ+W1iob+izzdmP2i359pc7qvzNKB/pp7hi936HRb9ajmlDl9hurF4CVql4UUhrop7m9er+mwdgGUW8a/YYf874CWKn222/gYB+DQ/2jNmyu+bPVYX5PC795qufoVzzCn/zd9aF+rftid/a2vti9yCfEfpo9eDxW7U50NK17hkJ1u87qCNUNTAuV0T7aLvtj0NYX+6Ec65fGWCZtHgjVDU5pDdW17T5Yc81v1i6repkeDogkrq4QKPL1AUUkH+EQaMDrA4pIDuo5Eij09QFFJB/1hEDDXh9QRLKT+4lBM7vOzF4wsxd27g6efhWR3NQTAsf0+oDufqe7L3T3hdWeByAijVNPCBT6+oAiko/wk4XcvWRmfw08RXnyzD3u/mpmKxORQtT1jEF3fxx4PKO1iEgD6BmDIokrdO/A+m3doX0Av/3n6s97Hsvnr7o2VDfYNfomp6Pp3BQbZnSgpytUd3ByLMPbt8f2RkQ2rgBMDI6Tae2P9fPghqyJvYOhuua+2N6BgyfG9g7sO6n2PRyH1le/r+hIQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSZxCQCRxhe4itOHYVKDobsBVD9wbqotOPOo7JTZqp2VvcaPgAPZ8tPqIq7HMei44EqgUe23JgZ7JobqWPbHdgBa8GTZ+ZWqornNrbLvj1I21T5CyQ9W/OB0JiCROISCSOIWASOLqGUPWY2a/MLPXzOxVM7sxy4WJSDHqOTFYAm5y99Vm1gW8aGar3P21jNYmIgUIHwm4e6+7r668vRdYj8aQiRx3MjknYGbzgXOA57P4fCJSnLpDwMwmAQ8B33D3P3q53ZGzCIcO9tfbTkQyVlcImFkL5QBY4e4Pj3adkbMIW9piT1IRkfzU89cBA+4G1rv797NbkogUqZ4jgU8BXwY+Z2YvVf5dmtG6RKQg9Qwk/RUQnPUiIuOFnjEokriCdxE6bX21z8GLzgaM7gaMzj68+Iqlobo9C2qfLQcwMC2W4TOe2R6q23L5rFCdxTYRMtwWq2s+ECvsXnMgVHfCm6EyhoLnyftn1v5je6il+kG7jgREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREElfoLkIAn1D7SxB0bvqjly48JtHZgNHdgCsfXB6qu2TO2aG64WsvCNX1XhLbDdgU22RH565DobqDXbGXqxhuL/ZlLgaD6+x5YneobtuiE2uu8TF+3etIQCRxCgGRxCkERBKXxdyBJjNbY2aPZbEgESlWFkcCN1IeQSYix6F6h4/MA74A3JXNckSkaPUeCfwAuBmI/Q1IRBqunglElwE73P3Fo1zv/2cRDmoWoch4U+8EosvNbBPwU8qTiO4/8kofmEXYqlmEIuNNOATc/VZ3n+fu84ErgZ+7+9WZrUxECqHnCYgkLpO9A+7+S+CXWXwuESmWjgREElfsLkIbezdTNQd6ukLtWvZ6qC46GzC6G/CpbS+F6j799T8N1XX9ameojr69obLhk2eH6ibMid0OxG52Wre+F6qbt+z1UN2+i84I1c1ZsaHmmrf6BqpepiMBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcSZe3DLVUDXlHl+9mdvrLnu4ORYVpWCM+lKHbG6ln2x72V7X+x1Wp/74X+E6hYv+XKobnBqW6hu98daQ3WdO2Lfl87tQ6G65n2Dobq3LpsUquteE/v69s+o/efhdw/dzv4dm0e9Y+tIQCRxCgGRxCkERBJX7wSiKWb2oJltMLP1ZnZBVgsTkWLU+/JidwBPuvsVZtYKdGawJhEpUDgEzGwy8BngGgB3HwRip1dFpGHqeTiwANgJ3FsZTX6XmWnEkMhxpp4QaAbOBZa5+zlAP3DLkVfSLEKR8a2eENgCbHH35yvvP0g5FD5AswhFxrd6ZhG+A2w2s1MrH1oEvJbJqkSkMPX+deBvgBWVvwz8Hri2/iWJSJHqCgF3fwlYmNFaRKQB9IxBkcQVOovQhp3WPbXv8GrfXgr12/PR2InIGc9sD9X1XjIrVBedDRjdDfjkIz8O1Z37T9eH6uY80xeqc4vt5hyYE3vOmjfHdkm27Iutc3937Hewx9pVpSMBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQVuouw1DmBXWd11FxnpdiMv1nPvRuq23J5bDdg04FQGfTtDZUNLpgWqovuBlz998tCdde+/elQ3bSW2GtSdjTFZhE+dceFsX7bY/fP7me3her2nzqj5pqmg9XXqCMBkcQpBEQSpxAQSVy9swi/aWavmtk6M3vAzNqzWpiIFCMcAmY2F7gBWOjuZwJNwJVZLUxEilHvw4FmoMPMmikPI42d7hSRhqln+MhW4HvA20Av8L67r8xqYSJSjHoeDkwFllAeTDoHmGhmV49yvf+bRVg6oFmEIuNNPQ8HLgL+4O473X0IeBj45JFXGjmLsLlDswhFxpt6QuBt4Hwz6zQzozyLcH02yxKRotRzTuB5ypOIVwOvVD7XnRmtS0QKUu8swm8D385oLSLSAHrGoEjiCt1FeKgJBgIb3yZuDTYsDYfKLFZG565Dobrhk2eH6nZ/rDVUF50NGN0NeO9Jz4Xqbuo9N1T38vtzQ3XNA7HdgJ3bY7sWo/fPgWlNNdccaq4+wFBHAiKJUwiIJE4hIJI4hYBI4hQCIolTCIgkTiEgkjiFgEjiFAIiiVMIiCROISCSOIWASOIUAiKJK3QX4YRhaA+MB2ztj+3uGuiZHKobbguVcbCr+k6tsUyYU/t8RoDOHbFdi26xdUZnA0Z3A/7L7NWhuvv21D6rD+Anm2aG6t49ozNU19Ybu92nrn2v5prm/aWql+lIQCRxCgGRxCkERBJ31BAws3vMbIeZrRvxsWlmtsrM3qj8PzXfZYpIXo7lSOA+YPERH7sFeNrdTwGerrwvIseho4aAuz8LHHlOfwmwvPL2cuBLGa9LRAoSPScw0917K2+/A8T+tiIiDVf3iUF3d6DqH/I1i1BkfIuGwHYzmw1Q+X9HtStqFqHI+BYNgUeBpZW3lwKPZLMcESnasfyJ8AHgf4BTzWyLmX0N+A7weTN7g/J04u/ku0wRyctR9w64+1VVLlqU8VpEpAH0jEGRxBW6i9BK0NZX+843j216o2XPYKiu+UBsG+Fwe3ChsU2S4Rl4A3Niu946mmL9orMBo7sBrzmh6nnqMf3ngdjXN239gVBd2LbA1zekXYQiUoVCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSVyhuwgBbLj2mom9sd2AFtyd172m2F1hrVtrny0HUJreFarz5tguyafuuDBU1zwQuyGiswGjuwGfePwnoboz//WvQnUfuX9nqG7XF0+ruab0WHvVy3QkIJI4hYBI4hQCIomLziL8rpltMLOXzexnZjYl32WKSF6iswhXAWe6+8eB14FbM16XiBQkNIvQ3Ve6++EXLfs1MC+HtYlIAbI4J/BV4IlqF35gDNmAxpCJjDd1hYCZ3QaUgBXVrvOBMWTtGkMmMt6EnyxkZtcAlwGLKkNJReQ4FAoBM1sM3Ax81t33Z7skESlSdBbhvwFdwCoze8nMfpTzOkUkJ9FZhHfnsBYRaQA9Y1AkcYXuImwaPMSkzQM11zX31V4DsPErU0N1J7wZKmOwKzaLcN6y10N1b10zO1TXsi+2zo7tsfO/0ZmJ754Rm5kYnQ0Y3Q247oZ/D9WdVYr16wzeDtXoSEAkcQoBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcYXuIvQmY3BKa811B0+svQagc2tst9xQ8KUQe57YHarbd9EZobruNYdCdfu7Y9nf/ey2UB2lwABKoK23I9YvKDobMLob8JW/je0+vPgvltZc0zRQ/b6iIwGRxCkERBIXGkM24rKbzMzNbHo+yxORvEXHkGFmPcDFwNsZr0lEChQaQ1ZxO+WXHdfMAZHjWOicgJktAba6+9qM1yMiBav5T4Rm1gl8i/JDgWO5/nXAdQBtHZpgLjLeRI4ETgYWAGvNbBPlicSrzWzWaFceOYuwpVWzCEXGm5qPBNz9FWDG4fcrQbDQ3XdluC4RKUh0DJmIfEhEx5CNvHx+ZqsRkcLpGYMiiVMIiCTO3It7rs8Jk+b6eZ+4vua6fSfFdpO17ovtXuufGdtcOTQptmtxzooNobrtV5waqvPYMpm8qRSqG5jWFKqbuva9UB3bdoTKdn/xtFi/4I/Q5I37Q3UrH1pec815l2zmhbUDo97yOhIQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSVyhuwjNbCfwVpWLpwNFvkSZ+h2//T7MX1te/T7i7t2jXVBoCIzFzF5w94Xqp37jqVcK/fRwQCRxCgGRxI2nELhT/dRvHPb60PcbN+cERKQxxtORgIg0gEJAJHEKAZHEKQREEqcQEEnc/wKh62d+C5b7vAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQQAAAECCAYAAAAYUakXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEiBJREFUeJzt3XuMnPV1xvHn7O6Ml7XX2Mb3S1hz\nCZSiNLYsIKQyBELjGASRiFTS0LpNJFRoGkBEBESTqFX/iJQ0BKmNEQUS1DikEpAGcbcICCESCCzm\nZruYYrB3bWNDjC+79s7O7ukfM/5lvVp7d87MOzOG70da7W3Onncu++w7776/OebuAgBJamn0BgBo\nHgQCgIRAAJAQCAASAgFAQiAASBoeCGa2wsz+18zeMrObMu61yMyeMrMNZvaGmV2bZb8RfVvN7GUz\ne6gOvaaZ2X1mtrF8PT+Tcb/ry7fl62Z2r5m11/jn321mO83s9RFfm2Fma81sU/n99Iz7/aB8e75q\nZr8ys2lZ9hvxvW+ZmZvZzFr1G09DA8HMWiX9h6QvSjpD0lfM7IwMWxYl3eDufyLpHEn/kHG/Q66V\ntKEOfSTpNkmPufvpkv4sy75mtkDSNyUtc/czJbVKuqLGbX4macWor90k6Ul3P1XSk+XPs+y3VtKZ\n7v4pSW9KujnjfjKzRZIukrSlhr3G1eg9hLMkveXub7t7QdIvJV2WVTN33+7u3eWP96n0y7Igq36S\nZGYLJV0s6c4s+5R7TZW0XNJdkuTuBXf/MOO2bZKOM7M2SR2SttXyh7v7M5L+MOrLl0m6p/zxPZK+\nlGU/d3/C3YvlT38naWGW/cpulXSjpLqeOdjoQFggaeuIz3uU8S/oIWbWJWmJpOczbvVjle7Y4Yz7\nSNJJknZJ+mn5KcqdZjY5q2bu3ivphyr9FdsuaY+7P5FVvxHmuPv28jZslzS7Dj0P+ZqkR7NsYGaX\nSup191ey7DOWRgeCjfG1zBPRzKZIul/Sde6+N8M+l0ja6e4vZdVjlDZJSyWtdvclkvpU293pw5Sf\nu18mabGk+ZImm9mVWfVrNDO7RaWnnWsy7NEh6RZJ382qx9E0OhB6JC0a8flC1XiXczQzy6kUBmvc\n/YEse0n6rKRLzewdlZ4OXWBmP8+wX4+kHnc/tNdzn0oBkZXPS9rs7rvcfVDSA5LOzbDfIe+Z2TxJ\nKr/fmXVDM1sl6RJJX/VsFwCdrFLAvlJ+3CyU1G1mczPsmTQ6EH4v6VQzW2xmeZUOSD2YVTMzM5We\nX29w9x9l1ecQd7/Z3Re6e5dK1+037p7ZX1B33yFpq5mdVv7ShZLWZ9VPpacK55hZR/m2vVD1OXj6\noKRV5Y9XSfp1ls3MbIWkb0u61N37s+zl7q+5+2x37yo/bnokLS3ft9lz94a+SVqp0pHb/5N0S8a9\n/lylpySvSlpXfltZp+t5vqSH6tDn05JeLF/H/5E0PeN+/yxpo6TXJf2XpEk1/vn3qnR8YlClX46v\nSzpBpf8ubCq/n5Fxv7dUOtZ16DFze5b9Rn3/HUkzs37cHHqzclMAaPhTBgBNhEAAkBAIABICAUBC\nIABImiYQzOwq+tGv2Xp9HPqN1DSBIKneNwL9jt1+H+Xr1oh+STMFAoAGq+uJSTNntHrXotyY39v1\nwZBmndA65vfWb5sV6mdHuWrFg31qax97IaCPteRqQg2P0u9An9qOG7vf8Ng3ybhyfUe+goOFPuXy\nY/ezodh97nbkKzg42Kdcbux+LYXimF8f1xH6FYb6lW/tiP3Mo/Dc2I+/QqFP+SPclpLk0T+rR7gb\njnbfSVJL/0DFrQ4M7VNh+OC4j+y2in9yFboW5fTC44vGv+Aoy757dahfayFUpmLwNX+G22JJ0j8v\n9gs694WhUF1+d+yGGc6P/QsznklbxlruPwH5YFIeJbiOpjA7tlJ8cErs16h1ILYivr17c8U1v919\n/4Qux1MGAElVgVDP10MEkL1wIDTg9RABZKyaPYS6vh4igOxVEwgNez1EANmoJhAm9HqIZnaVmb1o\nZi/u+iB2VBxAfVQTCBN6PUR3v8Pdl7n7siOdZwCgOVQTCHV9PUQA2QufmOTuRTP7hqTHVZrYc7e7\nv1GzLQNQd1Wdqejuj0h6pEbbAqDBOFMRQFLXtQzrt80KrUt48V9Wh/qtPP/yUF3vF+eE6qZujf0X\npdgRO9h6IHiQNr87VKaeC/Khuvb354XqhoJrSjp2xNaG9M2LrYGY8/vKFxtJ0oenxG5P6/pkxTWD\n90/sxmQPAUBCIABICAQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEgIBAAJgQAgIRAAJHVd7Wge\nm6YUXbX4yNMTm1Yz2oqLvxqq80mx1YedGw+G6voXHx+qK0yLrbLrfDdUptz+2ISigeNjqw/3nBwq\nU9d3ngvVDZ2/NFQ3fVNsglb75g8qrnl73+CELsceAoCEQACQEAgAkmpGuS0ys6fMbIOZvWFm19Zy\nwwDUXzUHFYuSbnD3bjPrlPSSma119/U12jYAdRbeQ3D37e7eXf54n6QNYpQbcEyryTEEM+uStETS\n87X4eQAao+pAMLMpku6XdJ277x3j+2m2Y/FgX7XtAGSoqkAws5xKYbDG3R8Y6zIjZzu2tU+uph2A\njFXzXwaTdJekDe7+o9ptEoBGqWYP4bOS/lrSBWa2rvy2skbbBaABqhn2+qyk2MnmAJoSZyoCSOq6\n2tFNKgbm9UVnLUZXLT728JpQ3bnX/32obmBa7GDrrO79obrWnl2hum3XLQ7VDc2NrerL9UyK9ZsU\nm+0Yld+2J1bose0cmjGl8lbbJ/a3nz0EAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABICAQACYEA\nICEQACQEAoCEQACQ1HW1o0wabqv8JRSmbh0KtYvOWoyuWnzu1ttDdWffdHWoTsXYzMRtl58Uqpv/\nbDFU1zIYux+G2mP3e25vrK7v8rNDdW39sfvhwKzYr9+U3spXj3orqx0BVIhAAJAQCACSWsxlaDWz\nl83soVpsEIDGqcUewrUqjXEDcIyrdlDLQkkXS7qzNpsDoJGq3UP4saQbJcX+7wKgqVQzuekSSTvd\n/aVxLvfH2Y4HmO0INLNqJzddambvSPqlShOcfj76QofNdjyO2Y5AMwsHgrvf7O4L3b1L0hWSfuPu\nV9ZsywDUHechAEhqspbB3Z+W9HQtfhaAxmEPAUBS19WOwzmpf17l8+yKHbHVcp0bD4bqorMWo6sW\nn//+6lDdxUu/EKqb859vhere/5uloToFRy3m98cKO9ftCNXZ4pmhOm+JDUHvmx8dnp6vuGI4P7Fe\n7CEASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABICAQACYEAICEQACQEAoCkrqsdc32uuS9U\nPnfvwAmx1Y79i48P1c3q3h+qi85ajK5afLj78VDdGauvCdV94tG9obpiZ+Wr8yRpYHouVDc4b3qo\nbsdZ7aG6E3+xJVRXXP6JUF3He4MV17QMTmzlKHsIABICAUBCIABIqp3cNM3M7jOzjWa2wcw+U6sN\nA1B/1R5UvE3SY+7+ZTPLS+qowTYBaJBwIJjZVEnLJf2tJLl7QVKhNpsFoBGqecpwkqRdkn5aHgd/\np5kxmgk4hlUTCG2Slkpa7e5LJPVJumn0hUbOdhwsMNsRaGbVBEKPpB53f778+X0qBcRhRs52zOXZ\ngQCaWTWzHXdI2mpmp5W/dKGk9TXZKgANUe1/Gf5R0pryfxjelvR31W8SgEapKhDcfZ2kZTXaFgAN\nxpmKAJK6rna0IVd+d+WnKuR3x/oVpsVW2bX27ArVbbv8pFBddNZidNXi+qt/Eqpb+cBfhupaX3oj\nVDfpvCWhuuLk2MPaY4tqVeiaFaqb+WrlK38lydsCMyEnWMIeAoCEQACQEAgAEgIBQEIgAEgIBAAJ\ngQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASMx9YjPfaqFz6kJfdtY3Kq7ruSC2arHz3VCZ9pwSq5v/\nbGz1Wt/c2DK7mS/vC9W1HKh8NqAkPbL2v0N1f7X5c6G6ztzBUF1v/7RQ3fA1naG6wpwpobr+2bHH\n9ZStByqueWHdau3d3zvumkf2EAAkBAKAhEAAkFQ72/F6M3vDzF43s3vNrL1WGwag/sKBYGYLJH1T\n0jJ3P1NSq6QrarVhAOqv2qcMbZKOM7M2lQa9bqt+kwA0SjWDWnol/VDSFknbJe1x9ydqtWEA6q+a\npwzTJV0mabGk+ZImm9mVY1zuj7MdB5ntCDSzap4yfF7SZnff5e6Dkh6QdO7oCx022zHHbEegmVUT\nCFsknWNmHWZmKs123FCbzQLQCNUcQ3hepYnP3ZJeK/+sO2q0XQAaoNrZjt+T9L0abQuABuNMRQBJ\nXWc7thSKmrTlDxXXtb8/L9Qvt384VDc0t/L5k5LUMhgcDhhccFrsDM6uDM5ajK5a/MXip0J1//r+\n6aG6XQdjqw8H9uwP1dkJsYPlgx2BGY2S+hZUfkLw8PqJ/e1nDwFAQiAASAgEAAmBACAhEAAkBAKA\nhEAAkBAIABICAUBCIABICAQACYEAICEQACR1Xe0oMymfq7hsKDjtYeD42GqyXM+kUN1Qe2y2Y35/\nbLnjwPTKb0tJmnTeklBdZy72gljRVYv/NHNjqO6GwY5Q3YapJ4fq+ucGV50WYvf7wRmV/x0fnuBv\nOnsIABICAUBCIABIxg0EM7vbzHaa2esjvjbDzNaa2aby++nZbiaAepjIHsLPJK0Y9bWbJD3p7qdK\nerL8OYBj3LiB4O7PSBr9QoiXSbqn/PE9kr5U4+0C0ADRYwhz3H27JJXfz67dJgFolMwPKo6c7VgY\n6s+6HYAqRAPhPTObJ0nl9zuPdMGRsx3zrbETRgDURzQQHpS0qvzxKkm/rs3mAGikifzb8V5Jv5V0\nmpn1mNnXJX1f0kVmtknSReXPARzjxj3D2d2/coRvXVjjbQHQYJypCCCp72pHqbTisUIdO2KrwvbE\nFq9paFKsX25vbLVj57odobrBebETRIuTY3d7b/+0UF101mJ01eK/zesO1V18YGGornUw9njp3NwX\nqvvw9MpvT5vgQ5M9BAAJgQAgIRAAJAQCgIRAAJAQCAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAA\nkNR1taPnWlWYPbniur55sRmNXd95LlQX1Xf52aE6WzwzVLfjrNjQS28NlenEazpDdQN79ofqorMW\no6sWH37uwVDd8muuCtX1XjA1VNdSqLyG2Y4AKkYgAEgIBABJdLbjD8xso5m9ama/MrPYS+kAaCrR\n2Y5rJZ3p7p+S9Kakm2u8XQAaIDTb0d2fcPdi+dPfSYod1gXQVGpxDOFrkh490jcPG+VWiL2oJID6\nqCoQzOwWSUVJa450mcNGueUrPwcBQP2ET0wys1WSLpF0obvHXocaQFMJBYKZrZD0bUnnuTsjnYGP\niOhsx3+X1ClprZmtM7PbM95OAHUQne14VwbbAqDBOFMRQGL1PB7YOW2hf/q8ayuuaz0Qm5nYUoxd\nt/y2PaG6/pNnhOpaC8OhuvY33wvVFbpmheq8Lbbq1IL3Q//cfKguOmsx6pmf3BGqW/m5L4fq9v3p\nCRXXvPLkbdq/e+u4dyB7CAASAgFAQiAASAgEAAmBACAhEAAkBAKAhEAAkBAIABICAUBCIABICAQA\nCYEAIKnrbEe51DpQ+cq+D0+JrXqbvikwBE+SgitAD8yK3Zx982OrCIvLPxGqm/lqbPXocHC142BH\nrK61ELsfOjfHXsw3OmsxumrxkafuC9UtvzoyS3JityV7CAASAgFAEhrlNuJ73zIzN7PYPHMATSU6\nyk1mtkjSRZK21HibADRIaJRb2a2SbtREj1YAaHqhYwhmdqmkXnd/pcbbA6CBKv4/mZl1SLpF0l9M\n8PJXSbpKkia1MzUeaGaRPYSTJS2W9IqZvaPS5OduM5s71oVHznbMMdsRaGoV7yG4+2uSZh/6vBwK\ny9z9/RpuF4AGiI5yA/ARFB3lNvL7XTXbGgANxZmKABICAUBS19WOLf0Dau/eXHGddX0y1K998weh\nuqEZU0J1U3qDqysVW83Z8d5gqC46ozG/O3b9+ha0h+oOzoj9vfrw9Nj91xK8+yKzFqXoqkXpmdWV\nz5I86wsTO+bPHgKAhEAAkBAIABICAUBCIABICAQACYEAICEQACQEAoCEQACQEAgAEgIBQEIgAEjM\ng3MMQ83Mdkl69wjfnimpni/DRr9jt99H+bpl1e9Ed5813oXqGghHY2Yvuvsy+tGvmXp9HPqNxFMG\nAAmBACBppkCo/GVg6Pdx7fdRvm6N6Jc0zTEEAI3XTHsIABqMQACQEAgAEgIBQEIgAEj+H7HwZvMK\n5HNFAAAAAElFTkSuQmCC\n", + "application/javascript": [ + "\n", + " setTimeout(function() {\n", + " var nbb_cell_id = 3;\n", + " var nbb_unformatted_code = \"from brainiak.matnormal.mnrsa import MNRSA\\nfrom brainiak.matnormal.covs import CovIdentity\\nfrom sklearn.linear_model import LinearRegression\\n\\n# beta_series RSA\\nmodel_linreg = LinearRegression(fit_intercept=False)\\nmodel_linreg.fit(true_X, true_Y)\\nbeta_series = model_linreg.coef_\\nnaive_RSA = np.corrcoef(beta_series.T)\\n\\n# MN-RSA\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nmodel_matnorm = MNRSA(time_cov=time_cov, space_cov=space_cov, n_nureg=3)\\n\\nmodel_matnorm.fit(true_Y, true_X)\\n\\n# very similar on this toy data but as we show in the paper can be very different\\n# in other examples\\nplt.matshow(model_matnorm.C_)\\nplt.matshow(naive_RSA)\";\n", + " var nbb_formatted_code = \"from brainiak.matnormal.mnrsa import MNRSA\\nfrom brainiak.matnormal.covs import CovIdentity\\nfrom sklearn.linear_model import LinearRegression\\n\\n# beta_series RSA\\nmodel_linreg = LinearRegression(fit_intercept=False)\\nmodel_linreg.fit(true_X, true_Y)\\nbeta_series = model_linreg.coef_\\nnaive_RSA = np.corrcoef(beta_series.T)\\n\\n# MN-RSA\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nmodel_matnorm = MNRSA(time_cov=time_cov, space_cov=space_cov, n_nureg=3)\\n\\nmodel_matnorm.fit(true_Y, true_X)\\n\\n# very similar on this toy data but as we show in the paper can be very different\\n# in other examples\\nplt.matshow(model_matnorm.C_)\\nplt.matshow(naive_RSA)\";\n", + " var nbb_cells = Jupyter.notebook.get_cells();\n", + " for (var i = 0; i < nbb_cells.length; ++i) {\n", + " if (nbb_cells[i].input_prompt_number == nbb_cell_id) {\n", + " if (nbb_cells[i].get_text() == nbb_unformatted_code) {\n", + " nbb_cells[i].set_text(nbb_formatted_code);\n", + " }\n", + " break;\n", + " }\n", + " }\n", + " }, 500);\n", + " " + ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -338,7 +418,8 @@ } ], "source": [ - "from brainiak.matnormal import MNRSA\n", + "from brainiak.matnormal.mnrsa import MNRSA\n", + "from brainiak.matnormal.covs import CovIdentity\n", "from sklearn.linear_model import LinearRegression\n", "\n", "# beta_series RSA\n", @@ -348,12 +429,15 @@ "naive_RSA = np.corrcoef(beta_series.T)\n", "\n", "# MN-RSA\n", - "model_matnorm = MNRSA(time_cov=time_cov,\n", - " space_cov=space_cov, n_nureg=3)\n", + "space_cov = CovDiagonal(size=n_V)\n", + "time_cov = CovAR1(size=n_T)\n", + "\n", + "model_matnorm = MNRSA(time_cov=time_cov, space_cov=space_cov, n_nureg=3)\n", "\n", "model_matnorm.fit(true_Y, true_X)\n", "\n", - "# very similar on this toy data but in real settings may be more different. \n", + "# very similar on this toy data but as we show in the paper can be very different\n", + "# in other examples\n", "plt.matshow(model_matnorm.C_)\n", "plt.matshow(naive_RSA)" ] @@ -362,7 +446,7 @@ "metadata": { "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python [conda env:brainiak]", + "display_name": "Python [conda env:brainiak] *", "language": "python", "name": "conda-env-brainiak-py" }, @@ -376,7 +460,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.6.8" } }, "nbformat": 4, From a1fa69e86b5c256f1e84dae0664c1df84c073c3c Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 21:24:29 -0700 Subject: [PATCH 50/84] final typo fixes --- brainiak/matnormal/covs.py | 5 ++--- brainiak/matnormal/matnormal_likelihoods.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 8fe2aa056..3f35c4d3b 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -172,11 +172,10 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): if rho is None: self.rho_unc = tf.Variable( - tf.random.normal([1], dtype=tf.float64), name="rho" + tf.random.normal([1], dtype=tf.float64), name="rho_unc" ) else: - self.rho_unc = tf.Variable( - 2 * tf.sigmoid(self.rho_unc) - 1, name="rho") + self.rho_unc = tf.Variable(scipy.special.logit(rho / 2 + 0.5), name='rho_unc') @property def logdet(self): diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 22b921bf4..67ed131dc 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -273,7 +273,7 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): .. math:: X \\sim \\mathcal{MN}(0, R, Q)\\ Y \\mid \\X \\sim \\mathcal{MN}(XA, R, C),\\ - Y \\sim \\mathcal{MN}(0, R, C + AQA) + Y \\sim \\mathcal{MN}(0, R, C + A'QA) This function efficiently computes the marginals by unpacking some info in the covariance classes and then dispatching to solve_det_marginal. From ee572832f6eac5586e65d34a06366496f8fe4799 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 21:31:02 -0700 Subject: [PATCH 51/84] autoformat --- brainiak/matnormal/mnrsa.py | 9 +- brainiak/utils/kronecker_solvers.py | 141 +++++++++--------- tests/matnormal/test_matnormal_logp.py | 7 +- .../test_matnormal_logp_conditional.py | 14 +- .../matnormal/test_matnormal_logp_marginal.py | 9 +- tests/matnormal/test_matnormal_regression.py | 5 +- tests/matnormal/test_matnormal_utils.py | 6 +- 7 files changed, 90 insertions(+), 101 deletions(-) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index 68e90ab4b..d3e6d4354 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -117,7 +117,7 @@ def fit(self, X, y, naive_init=True): self.n_c = X.shape[1] - if naive_init: + if naive_init: # initialize from naive RSA m = LinearRegression(fit_intercept=False) m.fit(X=X, y=Y) @@ -126,11 +126,12 @@ def fit(self, X, y, naive_init=True): self.L_flat = tf.Variable( flatten_cholesky_unique(naiveRSA_L), name="L_flat", dtype="float64" ) - else: - + else: chol_flat_size = (self.n_c * (self.n_c + 1)) // 2 self.L_flat = tf.Variable( - tf.random.normal([chol_flat_size], dtype="float64"), name="L_flat", dtype="float64" + tf.random.normal([chol_flat_size], dtype="float64"), + name="L_flat", + dtype="float64", ) self.train_variables.extend([self.L_flat]) diff --git a/brainiak/utils/kronecker_solvers.py b/brainiak/utils/kronecker_solvers.py index 99d5d2d01..d39be648d 100644 --- a/brainiak/utils/kronecker_solvers.py +++ b/brainiak/utils/kronecker_solvers.py @@ -1,9 +1,6 @@ import tensorflow as tf -__all__ = [ - "tf_kron_mult", - "tf_masked_triangular_solve", -] +__all__ = ["tf_kron_mult", "tf_masked_triangular_solve"] def tf_solve_lower_triangular_kron(L, y): @@ -32,21 +29,24 @@ def tf_solve_lower_triangular_kron(L, y): else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_list = tf.stack( + [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L] + ) n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) - nb = tf.cast(n_prod/na, dtype=tf.int32) + nb = tf.cast(n_prod / na, dtype=tf.int32) col = tf.shape(input=x)[1] for i in range(na): - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0) t = xinb / L[0][i, i] xinb = tf_solve_lower_triangular_kron(L[1:], t) xina = xina - tf.reshape( - tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), [1, nb*col]), - [(na-i-1)*nb, col]) * \ - tf.reshape( - tf.tile(tf.reshape(t, [-1, 1]), [na-i-1, 1]), - [(na-i-1)*nb, col]) + tf.tile(tf.slice(L[0], [i + 1, i], [na - i - 1, 1]), [1, nb * col]), + [(na - i - 1) * nb, col], + ) * tf.reshape( + tf.tile(tf.reshape(t, [-1, 1]), [na - i - 1, 1]), + [(na - i - 1) * nb, col], + ) x = tf.concat(axis=0, values=[xt, xinb, xina]) return x @@ -78,22 +78,21 @@ def tf_solve_upper_triangular_kron(L, y): else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_list = tf.stack( + [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L] + ) n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) - nb = tf.cast(n_prod/na, dtype=tf.int32) + nb = tf.cast(n_prod / na, dtype=tf.int32) col = tf.shape(input=x)[1] - for i in range(na-1, -1, -1): - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + for i in range(na - 1, -1, -1): + xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0) t = xinb / L[0][i, i] xinb = tf_solve_upper_triangular_kron(L[1:], t) - xt = (xt - - tf.reshape( - tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), - [1, nb*col]), - [i*nb, col]) - * tf.reshape( - tf.tile(tf.reshape(t, [-1, 1]), [i, 1]), [i*nb, col])) + xt = xt - tf.reshape( + tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), [1, nb * col]), + [i * nb, col], + ) * tf.reshape(tf.tile(tf.reshape(t, [-1, 1]), [i, 1]), [i * nb, col]) x = tf.concat(axis=0, values=[xt, xinb, xina]) return x @@ -122,21 +121,25 @@ def tf_kron_mult(L, x): return tf.matmul(L[0], x) else: na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_list = tf.stack( + [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L] + ) n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) - nb = tf.cast(n_prod/na, dtype=tf.int32) + nb = tf.cast(n_prod / na, dtype=tf.int32) col = tf.shape(input=x)[1] xt = tf_kron_mult( - L[1:], - tf.transpose(a=tf.reshape(tf.transpose(a=x), [-1, nb]))) + L[1:], tf.transpose(a=tf.reshape(tf.transpose(a=x), [-1, nb])) + ) y = tf.zeros_like(x) for i in range(na): - ya, yb, yc = tf.split(y, [i*nb, nb, (na-i-1)*nb], 0) - yb = tf.reshape(tf.matmul(tf.reshape(xt, [nb*col, na]), - tf.transpose(a=tf.slice(L[0], - [i, 0], - [1, na]))), - [nb, col]) + ya, yb, yc = tf.split(y, [i * nb, nb, (na - i - 1) * nb], 0) + yb = tf.reshape( + tf.matmul( + tf.reshape(xt, [nb * col, na]), + tf.transpose(a=tf.slice(L[0], [i, 0], [1, na])), + ), + [nb, col], + ) y = tf.concat(axis=0, values=[ya, yb, yc]) return y @@ -171,17 +174,20 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): """ zero = tf.constant(0, dtype=tf.int32) - mask_mat = tf.compat.v1.where(tf.not_equal(tf.matmul(tf.reshape(mask, [-1, 1]), - tf.reshape(mask, [1, -1])), - zero)) - q = tf.cast(tf.sqrt(tf.cast(tf.shape(input=mask_mat)[0], dtype=tf.float64)), dtype=tf.int32) + mask_mat = tf.compat.v1.where( + tf.not_equal( + tf.matmul(tf.reshape(mask, [-1, 1]), tf.reshape(mask, [1, -1])), zero + ) + ) + q = tf.cast( + tf.sqrt(tf.cast(tf.shape(input=mask_mat)[0], dtype=tf.float64)), dtype=tf.int32 + ) L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q]) maskindex = tf.compat.v1.where(tf.not_equal(mask, zero)) y_masked = tf.gather_nd(y, maskindex) - x_s1 = tf.linalg.triangular_solve(L_masked, y_masked, - lower=lower, adjoint=adjoint) + x_s1 = tf.linalg.triangular_solve(L_masked, y_masked, lower=lower, adjoint=adjoint) x = tf.scatter_nd(maskindex, x_s1, tf.cast(tf.shape(input=y), dtype=tf.int64)) return x @@ -214,19 +220,20 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): """ n = len(L) if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, - lower=True, adjoint=False) + return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=False) else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_list = tf.stack( + [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L] + ) n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) - nb = tf.cast(n_prod/na, dtype=tf.int32) + nb = tf.cast(n_prod / na, dtype=tf.int32) col = tf.shape(input=x)[1] for i in range(na): - mask_b = tf.slice(mask, [i*nb], [nb]) - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + mask_b = tf.slice(mask, [i * nb], [nb]) + xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0) t = xinb / L[0][i, i] if tf.reduce_sum(input_tensor=mask_b) != nb: @@ -237,14 +244,13 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): # all valid - same as no mask xinb = tf_solve_lower_triangular_kron(L[1:], t) t_masked = t - xina = (xina - - tf.reshape( - tf.tile(tf.slice(L[0], [i+1, i], [na-i-1, 1]), - [1, nb*col]), - [(na-i-1)*nb, col]) - * tf.reshape( - tf.tile(tf.reshape(t_masked, [-1, 1]), [na-i-1, 1]), - [(na-i-1)*nb, col])) + xina = xina - tf.reshape( + tf.tile(tf.slice(L[0], [i + 1, i], [na - i - 1, 1]), [1, nb * col]), + [(na - i - 1) * nb, col], + ) * tf.reshape( + tf.tile(tf.reshape(t_masked, [-1, 1]), [na - i - 1, 1]), + [(na - i - 1) * nb, col], + ) x = tf.concat(axis=0, values=[xt, xinb, xina]) @@ -278,20 +284,21 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): """ n = len(L) if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, - lower=True, adjoint=True) + return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=True) else: x = y na = L[0].get_shape().as_list()[0] - n_list = tf.stack([tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]) + n_list = tf.stack( + [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L] + ) n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32) - nb = tf.cast(n_prod/na, dtype=tf.int32) + nb = tf.cast(n_prod / na, dtype=tf.int32) col = tf.shape(input=x)[1] L1_end_tr = [tf.transpose(a=x) for x in L[1:]] - for i in range(na-1, -1, -1): - mask_b = tf.slice(mask, [i*nb], [nb]) - xt, xinb, xina = tf.split(x, [i*nb, nb, (na-i-1)*nb], 0) + for i in range(na - 1, -1, -1): + mask_b = tf.slice(mask, [i * nb], [nb]) + xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0) t = xinb / L[0][i, i] if tf.reduce_sum(input_tensor=mask_b) != nb: @@ -301,14 +308,12 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): xinb = tf_solve_upper_triangular_kron(L[1:], t) t_masked = t - xt = (xt - - tf.reshape( - tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), - [1, nb*col]), - [i*nb, col]) - * tf.reshape( - tf.tile(tf.reshape(t_masked, [-1, 1]), [i, 1]), - [i*nb, col])) + xt = xt - tf.reshape( + tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), [1, nb * col]), + [i * nb, col], + ) * tf.reshape( + tf.tile(tf.reshape(t_masked, [-1, 1]), [i, 1]), [i * nb, col] + ) x = tf.concat(axis=0, values=[xt, xinb, xina]) return x diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index 304823f17..8e9572e12 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -28,8 +28,7 @@ def test_against_scipy_mvn_row(): rowcov_np = rowcov._cov - scipy_answer = np.sum(multivariate_normal.logpdf( - X.T, np.zeros([m]), rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp(X_tf, rowcov, colcov) assert_allclose(scipy_answer, tf_answer, rtol=rtol) @@ -43,8 +42,6 @@ def test_against_scipy_mvn_col(): colcov_np = colcov._cov - scipy_answer = np.sum(multivariate_normal.logpdf(X, - np.zeros([n]), - colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) tf_answer = matnorm_logp(X_tf, rowcov, colcov) assert_allclose(scipy_answer, tf_answer, rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 05b48255b..5bc7927ee 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -41,14 +41,11 @@ def test_against_scipy_mvn_row_conditional(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - Q_np = Q._cov - rowcov_np = rowcov._cov -\ - A.dot(np.linalg.inv(Q_np)).dot((A.T)) + rowcov_np = rowcov._cov - A.dot(np.linalg.inv(Q_np)).dot((A.T)) - scipy_answer = np.sum(multivariate_normal.logpdf( - X.T, np.zeros([m]), rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp_conditional_row(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer, rtol=rtol) @@ -71,14 +68,11 @@ def test_against_scipy_mvn_col_conditional(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - Q_np = Q._cov - colcov_np = colcov._cov -\ - A.T.dot(np.linalg.inv(Q_np)).dot((A)) + colcov_np = colcov._cov - A.T.dot(np.linalg.inv(Q_np)).dot((A)) - scipy_answer = np.sum(multivariate_normal.logpdf( - X, np.zeros([n]), colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) tf_answer = matnorm_logp_conditional_col(X_tf, rowcov, colcov, A_tf, Q) diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index 53649c8e2..fb95edc43 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -35,14 +35,11 @@ def test_against_scipy_mvn_row_marginal(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - Q_np = Q._cov rowcov_np = rowcov._cov + A.dot(Q_np).dot(A.T) - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, - np.zeros([m]), - rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer, rtol=rtol) @@ -60,13 +57,11 @@ def test_against_scipy_mvn_col_marginal(): A_tf = tf.constant(A, "float64") X_tf = tf.constant(X, "float64") - Q_np = Q._cov colcov_np = colcov._cov + A.T.dot(Q_np).dot(A) - scipy_answer = np.sum(multivariate_normal.logpdf( - X, np.zeros([n]), colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer, rtol=rtol) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index ac1d0c7ce..7ea32ec49 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -30,7 +30,7 @@ def test_matnorm_regression_unconstrained(): Y_hat = X.dot(B) rowcov_true = np.eye(m) colcov_true = wishart.rvs(p + 2, np.eye(p)) - + y = Y_hat + rmn(rowcov_true, colcov_true) row_cov = CovIdentity(size=m) @@ -80,8 +80,7 @@ def test_matnorm_regression_optimizerChoice(): row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedInvCholesky(size=p) - model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov, - optimizer="CG") + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov, optimizer="CG") model.fit(X, Y, naive_init=False) diff --git a/tests/matnormal/test_matnormal_utils.py b/tests/matnormal/test_matnormal_utils.py index fd6424e48..c52ccaf3e 100644 --- a/tests/matnormal/test_matnormal_utils.py +++ b/tests/matnormal/test_matnormal_utils.py @@ -5,10 +5,8 @@ def test_pack_unpack(): shapes = [[2, 3], [3], [3, 4, 2], [1, 5]] - mats = [tf.random.stateless_normal( - shape=shape, seed=[0, 0]) for shape in shapes] + mats = [tf.random.stateless_normal(shape=shape, seed=[0, 0]) for shape in shapes] flatmats = pack_trainable_vars(mats) unflatmats = unpack_trainable_vars(flatmats, mats) for mat_in, mat_out in zip(mats, unflatmats): - assert tf.math.reduce_all( - tf.equal(mat_in, mat_out)) + assert tf.math.reduce_all(tf.equal(mat_in, mat_out)) From 0cf0a77fcee1732554fdfa457025c36ebd40b3e9 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 5 Aug 2020 21:31:09 -0700 Subject: [PATCH 52/84] remove nb_black dependency --- examples/matnormal/MN-RSA.ipynb | 94 +++------------------------------ 1 file changed, 6 insertions(+), 88 deletions(-) diff --git a/examples/matnormal/MN-RSA.ipynb b/examples/matnormal/MN-RSA.ipynb index 4e1f3e76e..fc2849ae9 100644 --- a/examples/matnormal/MN-RSA.ipynb +++ b/examples/matnormal/MN-RSA.ipynb @@ -113,7 +113,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 1, @@ -131,37 +131,9 @@ "needs_background": "light" }, "output_type": "display_data" - }, - { - "data": { - "application/javascript": [ - "\n", - " setTimeout(function() {\n", - " var nbb_cell_id = 1;\n", - " var nbb_unformatted_code = \"%load_ext nb_black\\nimport scipy\\nfrom scipy.stats import norm\\nfrom scipy.special import expit as inv_logit\\nimport numpy as np\\nfrom numpy.linalg import cholesky\\nimport matplotlib.pyplot as plt\\n\\n\\ndef rmn(rowcov, colcov):\\n # generate random draws from a zero-mean matrix-normal distribution\\n Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0])))\\n return cholesky(rowcov).dot(Z).dot(cholesky(colcov))\\n\\n\\ndef make_ar1_with_lowrank_covmat(size, rank):\\n \\\"\\\"\\\" Generate a random covariance that is AR1 with added low rank structure\\n \\\"\\\"\\\"\\n sigma = np.abs(norm.rvs())\\n rho = np.random.uniform(-1, 0)\\n offdiag_template = scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)])\\n diag_template = np.diag(np.r_[0, np.ones(size - 2), 0])\\n I = np.eye(size)\\n\\n prec_matrix = (I - rho * offdiag_template + rho ** 2 * diag_template) / (sigma ** 2)\\n lowrank_matrix = norm.rvs(size=(size, rank))\\n return np.linalg.inv(prec_matrix) + lowrank_matrix.dot(lowrank_matrix.T)\\n\\n\\ndef gen_data(n_T, n_V, space_cov, time_cov):\\n\\n n_C = 16\\n U = np.zeros([n_C, n_C])\\n U = np.eye(n_C) * 0.6\\n U[8:12, 8:12] = 0.8\\n for cond in range(8, 12):\\n U[cond, cond] = 1\\n\\n beta = rmn(U, space_cov)\\n\\n X = rmn(np.eye(n_T), np.eye(n_C))\\n\\n Y_hat = X.dot(beta)\\n\\n Y = Y_hat + rmn(time_cov, space_cov)\\n\\n return beta, X, Y, U\\n\\n\\nn_T = 100\\nn_V = 80\\nn_C = 16\\n\\nspacecov_true = np.diag(np.abs(norm.rvs(size=(n_V))))\\ntimecov_true = make_ar1_with_lowrank_covmat(n_T, rank=7)\\n\\ntrue_beta, true_X, true_Y, true_U = gen_data(n_T, n_V, spacecov_true, timecov_true)\\n\\n%matplotlib inline\\nplt.matshow(true_U)\";\n", - " var nbb_formatted_code = \"%load_ext nb_black\\nimport scipy\\nfrom scipy.stats import norm\\nfrom scipy.special import expit as inv_logit\\nimport numpy as np\\nfrom numpy.linalg import cholesky\\nimport matplotlib.pyplot as plt\\n\\n\\ndef rmn(rowcov, colcov):\\n # generate random draws from a zero-mean matrix-normal distribution\\n Z = norm.rvs(norm.rvs(size=(rowcov.shape[0], colcov.shape[0])))\\n return cholesky(rowcov).dot(Z).dot(cholesky(colcov))\\n\\n\\ndef make_ar1_with_lowrank_covmat(size, rank):\\n \\\"\\\"\\\" Generate a random covariance that is AR1 with added low rank structure\\n \\\"\\\"\\\"\\n sigma = np.abs(norm.rvs())\\n rho = np.random.uniform(-1, 0)\\n offdiag_template = scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)])\\n diag_template = np.diag(np.r_[0, np.ones(size - 2), 0])\\n I = np.eye(size)\\n\\n prec_matrix = (I - rho * offdiag_template + rho ** 2 * diag_template) / (sigma ** 2)\\n lowrank_matrix = norm.rvs(size=(size, rank))\\n return np.linalg.inv(prec_matrix) + lowrank_matrix.dot(lowrank_matrix.T)\\n\\n\\ndef gen_data(n_T, n_V, space_cov, time_cov):\\n\\n n_C = 16\\n U = np.zeros([n_C, n_C])\\n U = np.eye(n_C) * 0.6\\n U[8:12, 8:12] = 0.8\\n for cond in range(8, 12):\\n U[cond, cond] = 1\\n\\n beta = rmn(U, space_cov)\\n\\n X = rmn(np.eye(n_T), np.eye(n_C))\\n\\n Y_hat = X.dot(beta)\\n\\n Y = Y_hat + rmn(time_cov, space_cov)\\n\\n return beta, X, Y, U\\n\\n\\nn_T = 100\\nn_V = 80\\nn_C = 16\\n\\nspacecov_true = np.diag(np.abs(norm.rvs(size=(n_V))))\\ntimecov_true = make_ar1_with_lowrank_covmat(n_T, rank=7)\\n\\ntrue_beta, true_X, true_Y, true_U = gen_data(n_T, n_V, spacecov_true, timecov_true)\\n\\n%matplotlib inline\\nplt.matshow(true_U)\";\n", - " var nbb_cells = Jupyter.notebook.get_cells();\n", - " for (var i = 0; i < nbb_cells.length; ++i) {\n", - " if (nbb_cells[i].input_prompt_number == nbb_cell_id) {\n", - " if (nbb_cells[i].get_text() == nbb_unformatted_code) {\n", - " nbb_cells[i].set_text(nbb_formatted_code);\n", - " }\n", - " break;\n", - " }\n", - " }\n", - " }, 500);\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" } ], "source": [ - "%load_ext nb_black\n", "import scipy\n", "from scipy.stats import norm\n", "from scipy.special import expit as inv_logit\n", @@ -240,7 +212,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 2, @@ -249,7 +221,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQoAAADxCAYAAAAz6fmnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAapklEQVR4nO3dfZAd1Xnn8e9vXoTQG0gIC4RkhLNYG0wIYK2DTdkhBjvEcUGqNslC/AIOu6rEWcc2OF4ILns3qXIpIbHNbhK8CjhgmyV2CE6omBhYbIpdF7C8GAOSwBCI5QEJAQYEEiNpZp79o3vwWJq595zp7jv3zvw+VV2al6Onz70jPXP69OnnKCIwM2ulb6Y7YGbdz4nCzNpyojCztpwozKwtJwoza8uJwszacqIw6yGSviRph6SHp/i+JP13SY9LelDSyXWc14nCrLdcDZzZ4vu/AhxbHuuBK+o4qROFWQ+JiDuAH7docjbw5SjcBRwq6ciq53WiMJtdjgJ+NOHzofJrlQxUDWBmrf3yLy2M5388mtT2vgf3bAKGJ3xpY0RsbKRjGZwozBr23I9HufvmVUltB4/8l+GIWFfhdE8Bqyd8vqr8WiW+9DBrXDAaY0lHDW4EPlje/TgFeCkitlUN6hGFWcMCGKOep7QlXQecBiyXNAR8BhgEiIgvAjcB7wEeB3YDH6rjvE4UZg0Lgn2RNkfRNlbEuW2+H8Dv1XKyCZwozDqgrhHFTJnROQpJZ0p6tFxFdnFNMVdL+o6kzZI2SfpoHXEnxO+X9D1J/1Rz3EMlXS/pEUlbJL21prgfL9+HhyVdJ2l+hVgHrAqUtEzSrZIeK/9cWlPcy8r34kFJ35B0aF19nvC9iySFpOXTiZ0qgFEi6ehWM5YoJPUDf0mxkuw44FxJx9UQegS4KCKOA04Bfq+muOM+CmypMd64y4FvRcS/BX6+jnNIOgr4fWBdRBwP9APnVAh5NQeuCrwYuC0ijgVuKz+vI+6twPERcQLwA+CSacSdKjaSVgPvBrZOM26WMSLp6FYzOaJ4C/B4RDwREXuBv6VYVVZJRGyLiPvLj1+m+A9XecEJgKRVwK8CV9YRb0LcQ4B3AFcBRMTeiHixpvADwMGSBoAFwNPTDTTFqsCzgWvKj68Bfq2OuBFxS0SMlJ/eRXGbL1uLlYyfBz4Jzf/vDGA0IunoVjOZKBpZQTaRpDXAScDdNYX8AsU/rlruY01wDPAs8DflZc2VkhZWDRoRTwF/RvFbcxvFrbJbqsbdz4oJt9+2Aytqjg/w28A/1xVM0tnAUxHx/bpitjOWeHSrWbuOQtIi4O+Bj0XEzhrivRfYERH3Ve7cgQaAk4ErIuIkYBfTG8L/lHK+4GyKRLQSWCjp/VXjTqWcca/116KkSykuJ6+tKd4C4A+BT9cRL0Ukzk94jmJyjawgA5A0SJEkro2IG+qICZwKnCXpXykuk94p6as1xR4ChiJifORzPUXiqOoM4MmIeDYi9gE3AG+rIe5Ez4w/dFT+uaOuwJLOB94LvC/qKxf/MxSJ8/vlz3IVcL+kI2qKf4AI2Jd4dKuZTBT3AMdKOkbSPIpJthurBpUkimv9LRHxuarxxkXEJRGxKiLWUPT12xFRy2/niNgO/EjS2vJLpwObawi9FThF0oLyfTmd+idibwTOKz8+D/jHOoJKOpPiMu+siNhdR0yAiHgoIl4XEWvKn+UQcHL5M2iIGE08utWMJYpyouo/AzdT/OP9ekRsqiH0qcAHKH7jP1Ae76khbtM+Alwr6UHgROCzVQOWI5TrgfuBhyh+3tN+wKhcFXgnsFbSkKQLgA3AuyQ9RjGC2VBT3L8AFgO3lj/DL9bY544KYCzSjm4lbwBk1qzjT5gXX//m4Ult3/T6p++r+FBYI7wy06xhxYKr7r2sSOFEYdYBY+FEYWYteERhZm0FYl/0z3Q3KumKBVeS1jtuc3GbjN1rcZuOPZnxEYVvj1bX1A/OcZuP3Wtxm449CTEafUlHt/Klh1nDigpX3ZsEUnQ0USxf1h9rVg8e8PXXHzXAup+f/1MLOh4bPiQ57vCeA2MC9B92KAetWXXAQpG+4Ywh3iRNB5Ys5eAjVx8Qdyzj3dQkBY8GDlnK/JUHxu3fmx53qkvhwcVLWbCi/j4PLlnKwUccGFcZTzhN1oeBJZO/FzlxASb7JT3V+6yMJUXD24eei4i0xRF4MjPLmtWD/L+bV7dvCLzn0fTFlFueWJnVj8Wb5yW3HZs8B01qz7L0f2mDO9P/4SwaSo+7b1HeP8jdK9Jjz3spPfbgrvS4w8vS4w4Mt28z0b6MZ3D79qW3feSzF/4wtW2EarusKJe2X05RW+TKiNiw3/dfT/G4/6Flm4sj4qaq563U+yYqVJnNRmMo6WglsdjTpygehziJ4pmkv6qj/9MeUUzo9LsoHqy5R9KNEVHHw0xms0Yg9kYtg/fXij0BSBov9jTx/1wAS8qPD6FCoaKJqvQ+pdNmc17mZOZySfdO+HziTmGTFXv6hf3+/n8FbpH0EWAhxYN6lVVJFCmdNjNgNH0J93MVHwo7F7g6Iv68LND8FUnHR1TbXajxycxycct6KO5umM01gRit5/ZoSrGnCyiLCUfEnWXV9eVULChUpfdJFaoiYmNErIuIdYcf1tvLWM2mayz6ko42Uoo9baUoUISknwXmU9RjraTKr/jXOk2RIM4Bfqtqh8xmm2IJd/URRUSMSBov9tQPfCkiNkn6I+DeiLgRuAj4a0kfL099fh1lBKedKKbqdNUOmc02dT4UVq6JuGm/r316wsebKaq81arSpMFknW7lseFDkhdS3bQ2fY3IL//SicltAX7439Lryy7amp6Moy994dDw4elzS0ueTG7KrszdL/YtT19lNLog/Z/L3ozVr/NeSm7KvBfzfjnm/EyaWjwZQVc/x5HCs4tmjWu/mKrbOVGYNazYKcwjCjNro6bbozPGicKsYYFcM9PM2vOIwsxamg01M50ozBpW7BTmEYWZteEKV2bWUoQ8ojCz9ryOIsPwnsHk+pY5y7JvfvqBrH4c+5W3JrfdeWx63OhLX1687KH0oejLR6e3Xbopb4lz/970fwI5hXiLK/M0r6xKf32vHJ3TB4icirkNKQrX+NLDzFqqr7juTHGiMGtYQM/fHp12mpO0WtJ3JG2WtEnSR+vsmNlsMb4yM+XoVlVGFCPARRFxv6TFwH2SbnUVbrMDzdmdwiJiG7Ct/PhlSVsoCu46UZhNUNSj6N7RQopa5igkrQFOAu6uI57ZbNPNlxUpKo+HJC0C/h74WETsnOT76yXdK+ne0Vd2VT2dWc8p5ihqKa6btDufpN+cMHf4v+p4DZVGFJIGKZLEtRFxw2Rtys1LNgKTbhhsNhfUsYQ7ZXc+SccClwCnRsQLkl5X+cRU21JQwFXAloj4XB2dMZuNAjEyVsvt0ZTd+f4T8JcR8QJARFTaz2NclUuPU4EPAO+U9EB5pG9BbjaHZGxSvHz8Ur081k8IM9nufEftd6o3Am+U9F1Jd5W7n1dW5a7H/6WxusVms0fmXY+qWwoOAMcCp1FsynWHpJ+LiBcrxOzsysy+YbF487yktjkl9XOe3QB47ANXJLc9+Y9+N7ntiz+XXoJ/5xvSB3OHPZwe99kT83L3wHDGoDJjhkkZO10u2J4euG97elyAF9emtx1ZuTcveIaanh5N2Z1vCLg7IvYBT0r6AUXiuKfKiXt7FYhZD6hxZWbKloL/QDGaQNJyikuRJ6q+Bj/rYdYBdTw9mril4M3AuyVtBkaBP4iI56ue24nCrGFFKbx6pvMSthQM4MLyqI0ThVnTorbbozPGicKsYS5cY2ZJev1ZDycKs4bVOUcxU5wozDrAicLMWvLeo2bWXsCIi+tmEIwNpjVdtDV9WW9OSX3IW5Z9/6fTl3uf+rHfSW47vDS5KbtWpP8jG5ufsXYaiIxVyzm/FEfnp7fVSHrgkUV5lQqWbklvu3PPQVmxU3mOwsySOFGYWUuzYY6ijlJ4/ZK+J+mf6uiQ2WwUoaSjW9UxovgosAVYUkMss1mp11dmVhpRSFoF/CpwZT3dMZt9IpjTGwABfAH4JLB4qgZlKa/1AANLMqb6zWYNMTrW27dHq2wp+F5gR0Tc16pdRGyMiHURsW5gwcLpns6sp83lOYpTgbPKgrrzgSWSvhoR76+na2azw2xYRzHtEUVEXBIRqyJiDUVJrm87SZhNIop5ipSjW3kdhVkH9Ppdj1oSRUTcDtzert3YAOxZlpY2oy/9jY2+vFScUy07Z1n2d7/wxeS2b/i79LgHb09/L1bekfde7FmcHnvfwoyl1gvS+3DEPa8mt911RN4y61cPSx80L9jWzK/0gNrmH8p9Oi6nqJl5ZURsmKLdvweuB/5dRNxb9bweUZg1rp5bnylbCpbtFlOsb6pt0/Devmdj1iPGxpR0tPHaloIRsRcY31Jwf38M/AkwXFf/nSjMGlZMVCbfHq20paCkk4HVEfHNOl+DLz3MOiDj0mPaWwpK6gM+B5w/nb/fihOFWQfUdOuz3ZaCi4HjgdslARwB3CjprKoTmk4UZh1Q012P17YUpEgQ5wC/9ZNzxEvA8vHPJd0OfKKOux6eozBrWJA2P9EumUTECDC+peAW4OvjWwpKOqvJ1+ARhVkH1LVCo92Wgvt9/bSaTutEYda4gGh/67OrOVGYdUA3PxmaoqOJQqMwuDPtDRs+PH2Z9bKH8n4IO9+QPjWTUy07Z1n2E7+Rvtz7xA0fTm677dS892J0wWhy275X09+3vozK2s+8+eDktqmPAIzr35PeduFQVugs3fzAVwqPKMwaVuezHjPFicKsaUHepihdqGrNzEMlXS/pEUlbJL21ro6ZzSZzvR7F5cC3IuLXJc0DMh4uNptDujgJpJh2opB0CPAOynXl5dNsGRvUmc0V6vnbo1UuPY4BngX+ptwA6EpJB1TPlbR+/Em40d27KpzOrEflPT3alaokigHgZOCKiDgJ2AVcvH+jiVW4+12F2+aqSDy6VJVEMQQMRcR4FZ3rKRKHmR1AiUd3qlKFezvwI0lryy+dDmxu8VfM5q4eH1FUvevxEeDa8o7HE8CHqnfJbBbq4iSQolKiiIgHgORqPP17YdFQ2ju25Mn0frx8dN6Q7bCH05eH71qRPujKqZadsyz7gYv/Krntm/5HelyAhdsy3ruMf+x7ljYTd+TovDKQI8+nV+0endfQ0N8PhZlZkrk8ojCzRF186zOFE4VZB6jHRxQuhWfWtNQ7HgnJRNKZkh6V9LikA9YtSbpQ0mZJD0q6TdLRdbwEJwqzxqm49Eg5WkX5yU5hvwIcB5wr6bj9mn0PWBcRJ1CsbfrTOl6BE4VZJ9Qzomi7U1hEfCcidpef3kVR0r8yJwqzThhLPFpru1PYfi4A/nla/d2PJzPNmpZXuGa5pIn7cGyMiI25p5T0foo1Tr+Y+3cn40Rh1gEZdz1abSnYbqew4lzSGcClwC9GREbV0Kn50sOsE+qZo3htp7DysYlzgBsnNpB0EvA/gbMiYkdd3e/oiCL6Yd+itCHYrowpmKWb8m5SP3ti+uKXsfnpy71X3pHej5xq2TnLsjd9JH25N8DfvpxeZvzQ/t3tG5WGYzC57aeu/GBy275t85PbAix/IP1nsmNddy92iIgRSeM7hfUDXxrfKQy4NyJuBC4DFgF/V+4/ujUiKu8i5ksPsw6oa8FVu53CIuKMes7005wozDqhx5dwV63C/XFJmyQ9LOk6SXnjQrO5IKjr9uiMmXaikHQU8PsUq8COp7hmOqeujpnNJoq0o1tVvfQYAA6WtI+iVP/T1btkNgt1cRJIUaUU3lPAnwFbgW3ASxFxS10dM5tVerwUXpVLj6UU68yPAVYCC8vVYPu3e61c/8irLtdvc0/qZUc3X3pUmcw8A3gyIp6NiH3ADcDb9m80sVz/wMEu129zVA1Pj86kKoliK3CKpAUqVnacDmypp1tms0yPX3pMezIzIu6WdD1wPzBC8Rx89sMrZnOBuvjWZ4qqVbg/A3ympr6YzU5dPv+QoqMrM8cGYPeKtHds3/J9yXH79+a9jIHh9CuuyNh2ec/i9GvM0QWjyW1zSurnPLsBcM7iFxqJvXPs4Kx+pBrcmXcdHxn/Q/v3NDhH4ERhZm05UZhZO71+6eF6FGbWlkcUZp3Q4yMKJwqzpsUcvz1qZok8ojCzVoQnM80sRee2FDxI0tfK798taU0d3XeiMGtaTU+PJm4peAHwQkT8G+DzwJ/U8RKcKMw6oUNbCpafX1N+fD1wevnQZiUdnaPQKMx7Ka3PowvSuzaW+yoyrhdznvzdtzC9cd+rGTk6o785JfUhb1l2znLv7w4/n9x2MKNMySur8y72+0bS2x70QnNLuGu66zHZloK/MFWbsrz/S8BhwHNVTuzJTLNOSM9vtWwpWDcnCrOm5dWaqLql4HibIUkDwCFA+vBuCm3Hv5K+JGmHpIcnfG2ZpFslPVb+mffIotkcU1MpvLZbCpafn1d+/OvAtyOi8s3ZlAvlq4Ez9/vaxcBtEXEscFv5uZlNpYbJzIgYAca3FNwCfH18S0FJ49sGXgUcJulx4EJq+r/Z9tIjIu6Y5F7s2cBp5cfXALcD/6WODpnNRh3cUnAY+I16zvYT052jWBER28qPtwMrpmooaT2wHmBwia9QbI6a6yszy+ufKd+GiVW4+12F2+aguVyu/xlJRwKUf+6or0tms1CPV+GebqKYOLN6HvCP9XTHbHaa9SMKSdcBdwJrJQ1JugDYALxL0mMUGwFtaLabZj2ux0cUKXc9zp3iW6fnnkxjMLgr7d3YO5yznDbvHc5ZTjs6P73tyIL0tn0j6a9vz9L0tsMxmN4J8qpl5yzLPnV++mB13s70n19fenH2bP3DzcXu5iSQwiszzZrW5ZcVKZwozDrBicLM2nHNTDNry5ceZtZal9/RSOFEYdYJThRm1spsqMLtRGHWCU4UZtaOqteOmVFOFGZN85aCecYGYHhZ2nLkeS+lx31lVV715AXb07O7MpZaH3HPq8ltn3lz+tLpnGHrp678YHrjTDnVsnOWZd+94Yrktmv/T97r27M1vbTBviarIPT2gMIjCrNO6PXJTG8AZNYJHXh6NKXotaQTJd0paZOkByX9h5TY063CfZmkR8oTfUPSoXkvyWwO6VyFq5Si17uBD0bEmyiKZn8h5f/vdKtw3wocHxEnAD8ALkmIYzZ3daYexcTtBK8Bfu2AbkT8ICIeKz9+mqI63eHtArdNFBFxB/Dj/b52S1k6HOAuio1IzGwS4wuuEkcUyyXdO+FYn3Gq5KLXAJLeAswD/qVd4DomM38b+FqLzrxWhXvAVbhtjtJY8nCh1U5hSPrfwBGTfOvSiZ9EREhTX8yUtW6/ApwXEW1v3lZKFJIuBUaAa6dqU+6buBFg/srMHWbNZoMaHwqLiDOm+p6kZyQdGRHbWhW9lrQE+CZwaUTclXLead/1kHQ+8F7gfXVsWWY2m2ks7aiobdHrcivCbwBfjojrUwNPK1FIOhP4JHBWROyeTgyzOaUzk5mTFr2WtE7SlWWb3wTeAZwv6YHyOLFd4LaXHmUV7tMoJlmGgM9Q3OU4CLhVEsBdEfE72S/LbI7oxIKriHieSYpeR8S9wH8sP/4q8NXc2NOtwn1V7omgGFoNJFY6nvdi+jv7ytF5/ejbnt52ZFF6P3YdcVBy2z3L0uOOHJ1eHrpvW0bZcGBwZ/oS9VcypphyqmXnLMt+9O1fTg8MrH38d9Mbp0845gmgx6/OvYTbrAP8UJiZteTCNWbWXoQvPcysPY8ozKw9Jwoza8cjCjNrLWju1muHOFGYdYBvj5pZe77rYWbteI7CzFrz3qN5oi+9JHr0pT+DEJnp+sW16W2Xbklv++ph6Q/j9u9JjzvyfPozJMsfyHsvct67vpH2baYjp6R+1rMbwKMfSt8K4JQ/aOa5xmJlZm9nCo8ozDqhxyczp1WFe8L3LpIUkpY30z2z2UERSUe3mm4VbiStBt4NbK25T2azS0SxjiLl6FLTqsJd+jxFlavufXVmXaJD+3o0ZlpzFJLOBp6KiO+XFa7MrJUuvqxIkV0zU9IC4A+BTye2Xz++R8Ho7oxdbs1mi+hMcd2ULQUntF0iaUjSX6TEnk5x3Z8BjgG+L+lfKTb/uV/SZHsNEBEbI2JdRKzrX9DkdtFmXWy8JkW7o5qULQXH/TFwR2rg7EQREQ9FxOsiYk1ErAGGgJMjIqMSpdkc0yVbCgJIejPFLmK3pAZOuT16HXAnsLYcqlyQGtzMChm3RxvdUlBSH/DnwCdy+j/dKtwTv78m54Rmc04Ao12zpeCHgZsiYijnRkRHV2YqMsq4N3gzZWTl3uS2O/ekL59esC197LhwKLkpo/PS34wd6/LGr/170mMf9EJ62/70HQaSl/UD2WsNcpZl33XZF5Pb9mfsjCHqW0xVw5aCbwXeLunDwCJgnqRXIqLVfIaXcJt1RGduj45vKbiBKbYUjIj3jX9cbgu6rl2SgAp7j5pZhs7c9UjZUnBaPKIwa1rQkYfCUrYU3O/rV1M8otGWE4VZB3TzA18pnCjMOsGJwsxaioCx3i5I4URh1gm9nSecKMw6wXMUZtaeE4WZteSdwvIMbx967pHPXvjDSb61HHiugVM6bvOxey3ulLFzlmUDR6c3rWUx1YzqbLn+iMMn+7qke1s9CDNdjtt87F6L23TsKTlRmFlLAYz29m0PJwqzxgWEE0UdNjpuo3GbjN1rcZuOPbkev/RQ9PgLMOt2h8xbEW87omX9p9d860eX39fx+ZME3TKiMJvdevwXshOFWSc4UZhZSxEwOjrTvajEicKsEzyiMLO2ejxRuGamWeM6s5t56paCkl4v6RZJWyRtlrSmXWwnCrOmBUSMJR0VpW4p+GXgsoj4WeAtTF7W/6c4UZh1QgdGFCRsKSjpOGAgIm4FiIhXImJ3u8BOFGadkF6uv9EtBYE3Ai9KukHS9yRdJqm/XWBPZpo1Le/2aNNbCg4AbwdOArYCXwPOB65q1SknCrMOiJqK69awpeAQ8EBEPFH+nX8ATqFNovClh1njEi87qt9CHd9SEKbYUhC4BzhU0nhtmHcCm9sFdqIwa9p4KbzmJzPbbikYEaPAJ4DbJD1EsR34X7cL7EsPs07oQD2K1C0FyzseJ+TEdqIwa1gA4eK6ZtZSuMKVmSWIHn961BWuzBom6VsUWwSkeC4izmyyP9PhRGFmbfn2qJm15URhZm05UZhZW04UZtaWE4WZtfX/AbkRPdHlTKXmAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQoAAADxCAYAAAAz6fmnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAbKElEQVR4nO3deZBd5Xnn8e9P3dqXloSEJJCwZCITYwYblYbBZuKFJRE2hVw1GRc4C8SM8UwFhzh4XBCmIMXUTJE4Y5uZMBiBiYmhwAyxx6pYbMYwVFKA2ReBARkT0WLRggxBe3c/88c5jS+93ff0Offq3tu/T9Up7r398J5X3dLT73nPe55XEYGZ2VgmHewOmFnrc6Iws7qcKMysLicKM6vLicLM6nKiMLO6nCjM2oik6yVtlfTMKF+XpP8paZOkpyStquK8ThRm7eW7wJoxvn4asDI/zgOuruKkThRmbSQi7gfeHCNkLfB3kXkQmCtpSdnzOlGYdZbDgVdq3vfmn5XSXbYBMxvb73xqZux4sz8p9tGn9m0E9tZ8tC4i1jWkYwU4UZg12PY3+3nozqVJsZOX/GJvRKwucbotwLKa90vzz0rxpYdZwwX9MZB0VGA98If53Y8TgLci4rWyjXpEYdZgAQxQzVPakm4GPgkskNQLXAZMBoiIbwMbgE8Dm4DdwB9VcV4nCrMGC4IDkTZHUbetiLPqfD2AP67kZDWcKMyaoKoRxcFyUOcoJK2R9Hy+iuyiitpcJuleSc9K2ijpgirarWm/S9Ljkv6h4nbnSrpN0s8lPSfpoxW1+5X8+/CMpJslTSvR1rBVgZLmS7pb0ov5f+dV1O7X8+/FU5J+KGluVX2u+dqFkkLSgvG0nSqAfiLpaFUHLVFI6gKuIltJdjRwlqSjK2i6D7gwIo4GTgD+uKJ2B10APFdhe4OuBO6IiN8EPlzFOSQdDvwJsDoijgG6gDNLNPldhq8KvAi4JyJWAvfk76to927gmIg4FngBuHgc7Y7WNpKWAb8NbB5nu4UMEElHqzqYI4rjgU0R8VJE7AduIVtVVkpEvBYRj+Wv/4XsH1zpBScAkpYCnwGuq6K9mnZ7gI8D3wGIiP0R8auKmu8GpkvqBmYAr463oVFWBa4Fbshf3wB8top2I+KuiOjL3z5IdpuvsDFWMn4T+Bo0/l9nAP0RSUerOpiJoiEryGpJWg4cBzxUUZPfIvvLVcl9rBorgG3A3+aXNddJmlm20YjYAvw12W/N18huld1Vtt0hFtXcfnsdWFRx+wBfAG6vqjFJa4EtEfFkVW3WM5B4tKqOXUchaRbw98CfRsTbFbR3OrA1Ih4t3bnhuoFVwNURcRywi/EN4d8jny9YS5aIDgNmSvr9su2OJp9xr/TXoqRLyC4nb6qovRnAnwOXVtFeikicn/AcxcgasoIMQNJksiRxU0T8oIo2gROBMyS9THaZdJKkGytquxfojYjBkc9tZImjrFOAX0bEtog4APwA+FgF7dZ6Y/Cho/y/W6tqWNI5wOnA70V15eKPJEucT+Y/y6XAY5IWV9T+MBFwIPFoVQczUTwMrJS0QtIUskm29WUblSSya/3nIuIbZdsbFBEXR8TSiFhO1tefRkQlv50j4nXgFUlH5R+dDDxbQdObgRMkzci/LydT/UTseuDs/PXZwI+qaFTSGrLLvDMiYncVbQJExNMRcWhELM9/lr3Aqvxn0CCiP/FoVQctUeQTVecDd5L95b01IjZW0PSJwB+Q/cZ/Ij8+XUG7jfZl4CZJTwEfAf572QbzEcptwGPA02Q/73E/YJSvCnwAOEpSr6RzgSuAUyW9SDaCuaKidv8GmA3cnf8Mv11hn5sqgIFIO1qVvAGQWWMdc+yUuPXHC5NiP3TEq4+WfCisIbwy06zBsgVXrXtZkcKJwqwJBsKJwszG4BGFmdUViAPRdbC7UUpLLLiSdJ7bbVy7jWy73dptdNsjGRxR+PZoeY36wbndxrfdbu02uu0RiP6YlHS0Kl96mDVYVuGqdZNAiqYmigXzu2L5ssnDPj/i8G5Wf3jaexZ0PP9yeomAge6Rh2xTZs5j5iHLhi0U6d6TXm1of8/wa8vJc+Yxfcnwdrv2DgsdVdeevmGfTeueQ8+0JcPaHZiafn07af/If7Zpk+fQM32EticXuHYe4ds8ddpcZvcsHb4Yp8DynBjh5zdlxjxmzR/+PS5qpJsNo/29KGL3m73bIyJtcQSezCxk+bLJ/OzOZfUDgU994YvJ7e45pNhE0byn05/g7l0zPzl27qb0BDT7me3JsbuPTO/DjM3Fnn/bt3hWcuxoCXkk6k//d7hv/vBfHlW0C9A3rTH/QB++8av/nBoboZa+rEhRqveNqFBl1okGUNLRqsY9oqipUHUq2YM1D0taHxFVPMxk1jECsT/aezqwzIiiIRWqzDrN4GRmytGqyqS5kSpU/Zty3THrTP1ewj22fHHLeZDd3TCbaALR38KjhRRlep9UoSoi1kXE6ohYvbDg3QmzTjEQk5KOVlXmV/y7FarIEsSZwOcr6ZVZB8mWcLduEkgx7t43sEKVWUcZfCgs5ain3pIESUfkG2A9nm+eVEl1t1KTBhGxgWxT1CTPv7wgeSHVvddfm9yPT3/41ORYgD2r3pccu/iBPcmxk7e/kxz7ytr0qvaLf5behx2r0hdnAexZmD7JNvP19ILyB6antzt7y/BVqqOZtL9YUfsdH5+aHDt1pN0/KhBBJQuuEpck/BeyX9pX5xtfbQCWlz13e4+HzNpC2mKrhAVXKUsSApiTv+6hxIZPtXwbwqzBsp3Ckn8nL5D0SM37dRExWBA5ZUnCXwB3SfoyMJOs4HFpThRmTVBgMnN7yeK6ZwHfjYj/kW90/T1Jx0REqY3InCjMGixQVTUzU5YknEu+KXNEPJDvXr+AkhszeY7CrAn6mZR01JGyadZmso2ekPRBYBrZvraleERh1mBV1cyMiD5Jg0sSuoDrI2KjpMuBRyJiPXAhcK2kr5BNj5xTxXaMThRmDZbtFFbN4H2kJQkRcWnN62fJdsurlBOFWRO4wpWZjSlCLf0cRwonCrMmaPdSeE1NFAPdSq5vWWRZ9oYn7y7Uj9NOOys5duDJ55Jj+084Njl22TXPJMe+ceaHkmNnbC92u3zKrvTY3QvT/7L3/GJ/cuz0jcMeOh7V3g8enhwL8P4b0tse6JmZHPt0gT5khWt86WFmY2r/4rpOFGYNFjBxtxSUtCx/nPVZSRslXVBlx8w6xeDKzJSjVZUZUfQBF0bEY5JmA49KuttVuM2Ga+XCuSnGnSgi4jXgtfz1v0h6juzpNicKsxpZPYrWHS2kqGSOQtJy4DjgoSraM+s0rXxZkaJ0opA0C/h74E8jYth+drVVuKfMnFf2dGZtJ5ujmKCXHgCSJpMliZsi4gcjxeRFN9YBpTeGNWtXE3YJtyQB3wGei4hvVNcls84SiL6BCXp7lOwJtT8ATpL0RH5UUvHXrNNM2E2KI+IfoYX/ZGYtwnc9ip5sTz/znv5VUmyRkvpFnt0AuP32m5NjP/nFtO0FAKbsTH++oe+YFcmxXenNsvMDBYe4BWaNpm9LD371t9LL5PcsSf9ezNm8NzkW4I2TDkuOVZEZtMcLdWNiT2aaWX0V1sw8aJwozJqglecfUjhRmDVYVgrPicLMxhLtf3vUicKswVy4xsyS+NLDzMbkOQozS+JEYWZj8joKM6svoM8rM9Pt7+mid838pNjFD+xJbrdISX0otiz7vmuvTY79xJfOS44dmJz+G+bArPTYWb3FyvXPeyptST3AO0f2JMd2703v89bjk0NRpC8Nz/6H9NCFD+wo1naiKucoJK0BriTbe/S6iLhihJjPAX+Rn/rJiPh82fN6RGHWBFUkCkldwFXAqUAv8LCk9bV1aiWtBC4GToyInZIOLX1iyj1mbmYJKqzCfTywKSJeioj9wC3A2iExXwSuioidABGxtYo/Q+lEIalL0uOS/qGKDpl1ogglHcACSY/UHLXXs4cDr9S8780/q/UB4AOS/knSg/mlSmlVXHpcADwHzKmgLbOOVGBl5vaIWF3iVN3ASuCTwFLgfkn/KiLSJ6NGUGpEIWkp8BngujLtmHWyCKq69NgCLKt5vzT/rFYvsD4iDkTEL4EXyBJHKWUvPb4FfA0Ydapd0nmDw6j+3QV2xDXrGKJ/YFLSUcfDwEpJKyRNAc4E1g+J+b9kowkkLSC7FHmp7J+gzJaCpwNbI+LRseIiYl1ErI6I1V0z0neLNuskBeYoxmgj+oDzgTvJLvdvjYiNki6XdEYediewQ9KzwL3Af46I0vd9y8xRnAickRfUnQbMkXRjRPx+2U6ZdZIq11FExAZgw5DPLq15HcCf5Udlxj2iiIiLI2JpRCwnGwL91EnCbASRzVOkHK3KC67MmsD1KICIuA+4r15c116Yu6k/qc3J299JPn//Cccmx0KxatlFlmX/v2vWJcd+ZtXvJMf2nZRepbroCHfS1p3JsdNnpS+fjknpHdm1ZEZybPeetL8/g/YsTB807z90VnrDG9NDA+rOP7Q6jyjMGs5Pj5pZgoEBJwozG0M2UelEYWZ1+NLDzOpq5VufKZwozJrAlx5mNqag/vLsVudEYdYEbX7l4URh1nAB4dujZlaPLz0K6NrTx+xntifFvrJ2UXK7y655plA/+o5JXxJdpFp2kWXZP37szuTYNWekP2u3d9H05FiAXauOSI6dtm1vcuzuw9L7sXdh+sB86R3FCjXN3pS+hHvfosaVQfBdDzMbk5/1MLP6guJP67WYsjUz50q6TdLPJT0n6aNVdcysk0z0ehRXAndExO/mNfzSnxc2m0haOAmkGHeikNQDfBw4ByDfkCS90IPZhKG2vz1a5tJjBbAN+Nt8A6DrJA2bNq6twr2/f3eJ05m1qaimuO7BVCZRdAOrgKsj4jhgF3DR0KDaKtxTunxlYhNUJB4tqkyi6AV6I+Kh/P1tZInDzIZR4tGaylThfh14RdJR+UcnA8+O8b+YTVxtPqIoe9fjy8BN+R2Pl4A/Kt8lsw7UwkkgRalEERFPAMkbqg5M7WL3kfOTYhf/bE9yP94480PJsQBdBe7NHJiVPhwsUi27yLLsO9bfmBx72soTk2MBDvzro+oH5fbNT6/CPW3bvuTYnufT5642n35IcizAYfenb2PZ/c6BQm0n80NhZpakzUcUZTcpNrMUobSjDklrJD0vaZOkYXcZa+L+naSQlDziH4sThVkTKNKOMduQuoCrgNOAo4GzJB09Qtxs4ALgoaFfGy8nCrNGS73jUf/y5HhgU0S8lK+EvgVYO0LcfwX+EkivC1CHE4VZwyVedmSXHgsGVzLnR+2elocDr9S8780/+/WZpFXAsoj4cZV/Ak9mmjVD+mTm9ogY17yCpEnAN8ifv6qSE4VZMwxU0soWYFnN+6X5Z4NmA8cA90kCWAysl3RGRDxS5sROFGaNVl3hmoeBlZJWkCWIM4HPv3uaiLeABYPvJd0HfLVskgDPUZg1RRV3PSKiDzgfuBN4Drg1IjZKulzSGY3sv0cUZs1Q0YKriNgAbBjy2aWjxH6ymrM2OVFM2t/PjM1vJ8XuWJW21BtgxvZiF4A7P9CVHDurN73tIqPLItWyiyzLvv3Ff0rvBHDPngeTYx/bszw59qipryXHXnblOcmxS3/yVnIswNsrZyfHTt3ZV6jticQjCrMmqHdZ0eqcKMyaoYWrV6UoW4X7K5I2SnpG0s2SplXVMbOOEWS3R1OOFjXuRCHpcOBPgNURcQzQRXa7xsyGqOKux8FU9tKjG5gu6QBZqf5Xy3fJrAO1cBJIUaYU3hbgr4HNwGvAWxFxV1UdM+sobV4Kr8ylxzyyJ9dWAIcBMyUNK9vkcv020aVedrTypUeZycxTgF9GxLaIOAD8APjY0CCX6zejssI1B0uZRLEZOEHSDGVPoJxMtqzUzIZq80uPcU9mRsRDkm4DHgP6gMeBdVV1zKyTqIVvfaYoW4X7MuCyivpi1plafP4hRVNXZg5M7mLf4llJsXsWpl+vTUmvyJ4p8EOb99SvkmMnbd2ZHLtr1RHJsUVK6hd5dgPg5On9ybEv7kuvrNZV4FfozNfT+9A/Y0pyLMC0N9Of39g3t4H/HJwozKwuJwozq6fdLz1cuMbM6vKIwqwZ2nxE4URh1mgxwW+PmlkijyjMbCyi/ScznSjMmsGJwszG5JWZZpbEiaIAwUB32tLsma+nTxPvXlhsOcj0bek/tXeO7Elvd9bU5Nhp29KXQ++bn95ukZL6UGxZ9n+cu6V+UO5LvR9Njn37iPTtE2a/sCc5FmDf/DnJsT0b05fgF+W7HmZWn0cUZjamFq81kaLumF3S9ZK2Snqm5rP5ku6W9GL+33mN7aZZe6uqFJ6kNZKel7RJ0kUjfP3PJD0r6SlJ90h6XxX9T7m4/y6wZshnFwH3RMRK4J78vZmNpoIKV5K6gKuA04CjgbMkHT0k7HGyLTSOBW4D/qqK7tdNFBFxP/DmkI/XAjfkr28APltFZ8w6VUUjiuOBTRHxUkTsB24h+7f4roi4NyIGq1g/CCytov/jfXp0UUQM7kL7OrBotMDaKtwH9hetMGPWIdJHFAsG/73kx3k1rRwOvFLzvjf/bDTnArdX0f3Sk5kREdLouTAi1pHX0pzds7TNp3TMiitYin97RKwufc5s64zVwCfKtgXjH1G8IWlJ3qElwNYqOmPWsaqpwr0FWFbzfmn+2XtIOgW4BDgjIvaV7Dkw/kSxHjg7f3028KMqOmPWqSqao3gYWClphaQpZHv9rn/PeaTjgGvIkkRlv8BTbo/eDDwAHCWpV9K5wBXAqZJeJNsI6IqqOmTWkSoYUUREH3A+cCfZHjq3RsRGSZdLOiMP+zowC/g/kp6QtH6U5gqpO0cREWeN8qWTC58tQP1pF2sHpqdX4e75xf5C3Xj1t9KXRHfvTe9HTEqP3X3Y9OTYadvSR49HTX2tflCNItWyiyzLvmbpA8mxHx44Ljl236Ezk2MBpu5I/7ux6/3py/V5pn7Ie1Q0OxcRG4ANQz67tOb1KdWc6b28MtOs0fz0qJklcaIws3r89KiZ1eVLDzMbWwc8PepEYdYMThRmNhZX4TazNE4UZlaPor0zhROFWaN5S8Fiolvsmz85KXb2lr7kdqdvTK8ODdCzZEVy7Nbj09vdtWRGcuzehem/YXqeT2/3sivPSY4FmPl6f3JskWrZRZZlP/m1/50c+6kvfDE5FuCtI6ckxx7y5FuF2i6kvQcUHlGYNYMnM82svjZPFOOtwv11ST/PK/3+UNLcxnbTrI0l1qJo5VHHeKtw3w0ck1f6fQG4uOJ+mXWWaipcHTTjqsIdEXflRTSgwkq/Zp1ocMFVO48oqpij+ALw/dG+mFcRPg9gygzvE2QTkwZaOAskGG/NTAAkXQL0ATeNFhMR6yJidUSsnjy1WHUis46QetnRwrlk3CMKSecApwMnR7T5sjOzBpuQC64krQG+BnyiZlciMxtNm/8qHW8V7r8BZgN355V+v93gfpq1tY6fzBylCvd3xnvC1Crck/anj9X2fnCsXdWGm7N5b3KsokDF7j3py6GX3vGr5NjNpx+S3u5Pii1D7p+RvsR59gt7kmOLVMsusiz73uuvTY4F+Ldf/lJy7Fu/OSe94ccLdCKANr8698pMsyaYkHMUZpbOhWvMrL4IX3qYWX3tPqIoteDKzBJVtOBK0hpJz0vaJOmiEb4+VdL3868/JGl5Fd13ojBrgipuj0rqAq4CTgOOBs6SdPSQsHOBnRHxG8A3gb+sov9OFGaNFsBApB1jOx7YFBEvRcR+4BZg7ZCYtcAN+evbgJMlpe+ePQonCrMm0EDaASyQ9EjNcV5NM4cDr9S8780/Y6SY/Anvt4D0hTij8GSmWTOk3/XYHhGrG9mV8fCIwqwJKlrCvQVYVvN+af7ZiDGSuoEeYEfZ/jtRmDVadY+ZPwyslLRC0hTgTGD9kJj1wNn5698FflrF093NLdcv6JuWNq+y4+Ppz1i8/4Zi5frfOOmw9OAC00B7Fqbn3dmb0mMPu39XcuzbK2cnxwJMezN9W4R989OfhZi6Y39ybJGS+kWe3QD4x/91TXLsb9z0n9Ibvjk9NFuZWX4hRUT0STofuBPoAq6PiI2SLgceiYj1ZM9hfU/SJrLKdGeWPjGeozBrjoqe9YiIDcCGIZ9dWvN6L/Dvqznbr42rCnfN1y6UFJIWVN0xs06iiKSjVY23CjeSlgG/DWyuuE9mnSUS11C0cF3NcVXhzn2TrMpV6/7pzFpExxeuGYmktcCWiHiygkVfZp2vhS8rUhROFJJmAH9OdtmREv/rcv0zXa7fJqBo/8I141lHcSSwAnhS0stkiz4ek7R4pODacv3dLtdvE9VgTYp6R4sqPKKIiKeBQwff58lidURsr7BfZp2ldXNAkvFW4TazAtr99uh4q3DXfn15Zb0x60QBJFafb1UtuzJz6kg3ZEcx0FNs7qPIbaiFD6Q/T7P/0FnJsfsWpfe5+50DybFTd6YvyQbYNzf9r0DPxp3Jsbve35Mce8iT6VsMFCqpT7Fl2Zt+7+rk2K6vpvdBtPZoIUXLJgqzjuJEYWZ1OVGY2ZiCyh4KO1icKMyawHMUZlafE4WZjSkCBtr72sOJwqwZ2jtPOFGYNYPnKMysPicKMxvT4E5hbUwVVPJOP5m0DfjnEb60AGjE06dut/Ftt1u7VbX9vohYmBLYM21xfOyIs+sHAne8+FePtuIGQM0t1z/KN1bSI4345rjdxrfdbu02uu1R+dLDzMYUQH973/ZwojBruIBwoqjCOrfb0HYb2Xa7tdvotkfW5pceTZ3MNJuIeqYsio8tHrP+07vueOXKcU9mSpoPfB9YDrwMfC4idg6J+QhwNTAH6Af+W0R8v17b3qTYrBmaU1z3IuCeiFgJ3JO/H2o38IcR8SGyjb2+JWluvYadKMyaoTmJYi1wQ/76BuCzw7sRL0TEi/nrV4GtQN3bvK0yR2HWuSKgv78ZZ1oUEa/lr18HFo0VLOl4YArwi3oNO1GYNUP6aGGBpEdq3q+LiHcnXyX9BBhpD51L3nu6CGn06rCSlgDfA86OqH9LxonCrBnSE8X2sSYzI+KU0b4m6Q1JSyLitTwRbB0lbg7wY+CSiHgwpVOeozBruKbtZr4eGFwrfjbwo6EBkqYAPwT+LiJuS23YicKs0QIiBpKOkq4ATpX0InBK/h5JqyVdl8d8Dvg4cI6kJ/LjI/Ua9qWHWTM04enRiNgBnDzC548A/yF/fSNwY9G2nSjMmqHNFzY6UZg1WvNujzaME4VZE4SL65rZ2CpZdXlQOVGYNVoHlMJzojBrBtejMLOxBBAeUZjZmMIVrswsQbT57VFXuDJrMEl3kG0RkGJ7RKxpZH/Gw4nCzOryQ2FmVpcThZnV5URhZnU5UZhZXU4UZlbX/wf37nUQme9vRgAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -258,33 +230,6 @@ "needs_background": "light" }, "output_type": "display_data" - }, - { - "data": { - "application/javascript": [ - "\n", - " setTimeout(function() {\n", - " var nbb_cell_id = 2;\n", - " var nbb_unformatted_code = \"import tensorflow as tf\\nfrom brainiak.matnormal.covs import CovDiagonal, CovAR1, CovUnconstrainedCholesky\\nfrom brainiak.utils.utils import cov2corr\\nfrom brainiak.matnormal.utils import (\\n make_val_and_grad,\\n pack_trainable_vars,\\n unpack_trainable_vars,\\n unflatten_cholesky_unique,\\n)\\nfrom brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row\\nfrom scipy.optimize import minimize\\n\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nrsa_cov = CovUnconstrainedCholesky(size=n_C)\\n\\nparams = (\\n rsa_cov.get_optimize_vars()\\n + time_cov.get_optimize_vars()\\n + space_cov.get_optimize_vars()\\n)\\n\\n# construct loss (marginal likelihood constructed automatically)\\n# note that params are ignored by this function but implicitly\\n# tracked by tf.GradientTape, and the remaining inputs are\\n# embedded via the closure mechanism\\ndef loss(params):\\n return -(\\n time_cov.logp\\n + space_cov.logp\\n + rsa_cov.logp\\n + matnorm_logp_marginal_row(\\n true_Y, row_cov=time_cov, col_cov=space_cov, marg=true_X, marg_cov=rsa_cov\\n )\\n )\\n\\n\\nval_and_grad = make_val_and_grad(lossfn=loss, train_vars=params)\\n\\nx0 = pack_trainable_vars(params)\\n\\nopt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method=\\\"L-BFGS-B\\\")\\n\\nfit_params = unpack_trainable_vars(opt_results.x, params)\\n\\nfor var, val in zip(params, fit_params):\\n var.assign(val)\\n\\nU = rsa_cov._cov.numpy()\\nC = cov2corr(U)\\nplt.matshow(C)\\nplt.colorbar()\";\n", - " var nbb_formatted_code = \"import tensorflow as tf\\nfrom brainiak.matnormal.covs import CovDiagonal, CovAR1, CovUnconstrainedCholesky\\nfrom brainiak.utils.utils import cov2corr\\nfrom brainiak.matnormal.utils import (\\n make_val_and_grad,\\n pack_trainable_vars,\\n unpack_trainable_vars,\\n unflatten_cholesky_unique,\\n)\\nfrom brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row\\nfrom scipy.optimize import minimize\\n\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nrsa_cov = CovUnconstrainedCholesky(size=n_C)\\n\\nparams = (\\n rsa_cov.get_optimize_vars()\\n + time_cov.get_optimize_vars()\\n + space_cov.get_optimize_vars()\\n)\\n\\n# construct loss (marginal likelihood constructed automatically)\\n# note that params are ignored by this function but implicitly\\n# tracked by tf.GradientTape, and the remaining inputs are\\n# embedded via the closure mechanism\\ndef loss(params):\\n return -(\\n time_cov.logp\\n + space_cov.logp\\n + rsa_cov.logp\\n + matnorm_logp_marginal_row(\\n true_Y, row_cov=time_cov, col_cov=space_cov, marg=true_X, marg_cov=rsa_cov\\n )\\n )\\n\\n\\nval_and_grad = make_val_and_grad(lossfn=loss, train_vars=params)\\n\\nx0 = pack_trainable_vars(params)\\n\\nopt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method=\\\"L-BFGS-B\\\")\\n\\nfit_params = unpack_trainable_vars(opt_results.x, params)\\n\\nfor var, val in zip(params, fit_params):\\n var.assign(val)\\n\\nU = rsa_cov._cov.numpy()\\nC = cov2corr(U)\\nplt.matshow(C)\\nplt.colorbar()\";\n", - " var nbb_cells = Jupyter.notebook.get_cells();\n", - " for (var i = 0; i < nbb_cells.length; ++i) {\n", - " if (nbb_cells[i].input_prompt_number == nbb_cell_id) {\n", - " if (nbb_cells[i].get_text() == nbb_unformatted_code) {\n", - " nbb_cells[i].set_text(nbb_formatted_code);\n", - " }\n", - " break;\n", - " }\n", - " }\n", - " }, 500);\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" } ], "source": [ @@ -358,7 +303,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 3, @@ -367,7 +312,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAECCAYAAAD+eGJTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAASXklEQVR4nO3dfWzc9X0H8PfbZ5/PThySkDhPtkjUBgjLWBJlFOjDptKGDCJCNdpRjQrWToh2W2mFxAhoQ9W0rRNVKdIGHQqUqER0UwgPYi1NRotQxcIGIYE8FpoHMCQkxCFOzknO5/vsj7t0jnd34T6/h3P4vl9SFPvuPv5872y//buH731oZhCRcLU0ewEi0lwKAZHAKQREAqcQEAmcQkAkcAoBkcA1PQRILiW5k+SbJO9IuFcvyV+S3EZyK8lbk+w3om+G5Kskn0mh10SSa0juILmd5GUJ9/t25bbcQvIxkrmYv/7DJA+Q3DLitMkk15N8o/L/pIT73VO5PV8j+QTJiUn2G3HebSSN5JS4+lXT1BAgmQHwLwD+CMBFAL5M8qIEWxYB3GZmFwG4FMBfJNzvlFsBbE+hDwDcB+BZM7sQwO8l2ZfkLADfBLDYzOYDyAC4PuY2jwBYOuq0OwA8Z2ZzATxX+TzJfusBzDeziwH8GsCKhPuBZC+AJQDeirFXVc0+ErgEwJtmtsvMCgB+AmB5Us3MbJ+Zbax8fBTlX5BZSfUDAJI9AK4GsDLJPpVe5wD4DICHAMDMCmb2QcJtWwF0kGwF0Ang3Ti/uJm9AKB/1MnLAayqfLwKwLVJ9jOzdWZWrHy6AUBPkv0q7gVwO4DEX83X7BCYBeDtEZ/3IeFfylNIzgawEMBLCbf6AcrfzFLCfQBgDoCDAH5UufuxkuS4pJqZ2TsAvofyX6t9AI6Y2bqk+o0wzcz2VT7eD2BaCj1P+SqAnyXZgORyAO+Y2eYk+5zS7BBoCpLjATwO4FtmNpBgn2UADpjZK0n1GKUVwCIAD5jZQgB5xHuofJrKffHlKIfPTADjSN6QVL9qrPy691Re+07yLpTvUq5OsEcngDsB/G1SPUZrdgi8A6B3xOc9ldMSQ7IN5QBYbWZrk+wF4JMAriG5B+W7Op8l+WiC/foA9JnZqaObNSiHQlI+B2C3mR00syEAawFcnmC/U94jOQMAKv8fSLohyZsALAPwp5bshpuPoRyqmys/Nz0ANpKcnlTDZofA/wCYS3IOySzKDyo9nVQzkkT5/vJ2M/t+Un1OMbMVZtZjZrNRvm6/MLPE/lKa2X4Ab5O8oHLSFQC2JdUP5bsBl5LsrNy2VyCdB0CfBnBj5eMbATyVZDOSS1G+S3eNmQ0m2cvMXjezbjObXfm56QOwqPK9TaxpU/8BuArlR1x/A+CuhHt9CuVDx9cAbKr8uyql6/mHAJ5Joc8CAC9XruOTACYl3O87AHYA2ALgxwDaY/76j6H8eMNQ5RfiawDORflZgTcA/CeAyQn3exPlx65O/cz8MMl+o87fA2BKkt9DVhqJSKCafXdARJpMISASOIWASOAUAiKBUwiIBG7MhADJm9VP/cZarxD6jZkQAJDqFVe/s7rfR/m6pd5vLIWAiDRBqi8WmjI5Y7N726qed/DQMKaem6l63rb9U139jLXPGx7MI9NZfYNdS7HqyWfUMlT7thwq5NGWrd5vqKvOQuvIDtTuVyjkka3RjycKrn71FErHkW3pqHpeKZd1fc2WQvVvRGH4OLKZ6r3KDX0/09bWWvX0oWIeba11NmP6vn0oZav/DR46mUdbe+1+peq/JnUVjvajeCJfdaXVr3VCZve24b9/3nvmC46y8B++4eo33O4qQ3u/74do/D5ferz7ad+3oXf9SVdd+45E92j9P8fn+7bf5/YedtXxuO92GZ4x2VVXavMdUOd76gRZHScnNJ46O564t+Z5ujsgErhIIZDm+wOKSDLcIdCE9wcUkQREORJI9f0BRSQZUUKgae8PKCLxSfyBQZI3k3yZ5MsHDw0n3U5EGhQlBD7U+wOa2YNmttjMFtd6HYCINE+UEEj1/QFFJBnuFwuZWZHkXwL4OcqTZx42s62xrUxEUhHpFYNm9lMAP41pLSLSBHrFoEjgUt07sG3/VNc+gFfvvN/Vb8l1N575QlUc6/W9pjvXd9RV17XHN1S3/0Lf5ojuo74ht6V2549Li2+HzYk5vtfyezcQZYZ8k+KGOn23y+BU39/grr7G96i0FGvfJjoSEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREAqcQEAlcqrsIjb6pQN7dgOvWrHLVLfq7r7vqOO8cV13BMVEGALo3nnDVDXy8y1U3cd1OV13mpG8iEDtyrjoM+97LcvCy8339nH9K24/4di0OnNf4r20pW/tnTEcCIoFTCIgETiEgErgoY8h6Sf6S5DaSW0neGufCRCQdUR4YLAK4zcw2kuwC8ArJ9Wa2Laa1iUgK3EcCZrbPzDZWPj4KYDs0hkzkrBPLYwIkZwNYCOClOL6eiKQncgiQHA/gcQDfMrOBKuf/dhbh8GA+ajsRiVmkECDZhnIArDaztdUuM3IWYaZzXJR2IpKAKM8OEMBDALab2ffjW5KIpCnKkcAnAXwFwGdJbqr8uyqmdYlISqIMJP0VAN+L3kVkzNArBkUCl+ouwpYi0N7f+Jw472xA727AjX/zgKvu81+6yVV36Hd91+/QPN8uu+77X3TV5Zdd4qrLflBw1bUUfLsBSx2+H+vcgUFXXeaw71mvgStnuOoGZzb+O1Rqq32ejgREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREApfuLsIhw/h9xYbrcn1HXf28swG9uwHX//sjrrqrP3Wtq847c+/IdZ9w1fXPy7jqJuzy/a0p5nxvV3Fiiq9uhm9zJd5fMMFVN+3FD1x1bfmJDdfsqzO2UkcCIoFTCIgETiEgErg45g5kSL5K8pk4FiQi6YrjSOBWlEeQichZKOrwkR4AVwNYGc9yRCRtUY8EfgDgdgClGNYiIk0QZQLRMgAHzOyVM1zut7MIhwqaRSgy1kSdQHQNyT0AfoLyJKJHR19o5CzCtqxmEYqMNe4QMLMVZtZjZrMBXA/gF2Z2Q2wrE5FU6HUCIoGLZe+AmT0P4Pk4vpaIpEtHAiKBS3UX4VAX8e6nG2/ZtWeSq19hgm83mXc2oHc34H/86klX3cK//4arbtqGI666cWt3uOpaZ0531SHj/BuV8e12LO7e66rrPjTXVVfqqDMgsI7c4cZ3j7YUa88v1JGASOAUAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAROISASOIWASOBS3UWYHTD0rj/ZcF3/he2uft0b6wxgq+PQvJyrzjsb0Lsb8NW77nfVLV3+FVfdwJ/8vqtucLrvb03mRO2db/WMe8/3vrf5JTNcdXS+zW5Hv69w/JN139azKhYHa56nIwGRwCkERAKnEBAJXNQJRBNJriG5g+R2kpfFtTARSUfUBwbvA/CsmV1HMgugM4Y1iUiK3CFA8hwAnwFwEwCYWQFAIZ5liUhaotwdmAPgIIAfVUaTrySpEUMiZ5koIdAKYBGAB8xsIYA8gDtGX2jkLMKCZhGKjDlRQqAPQJ+ZvVT5fA3KoXCakbMIs5pFKDLmRJlFuB/A2yQvqJx0BYBtsaxKRFIT9dmBvwKwuvLMwC4AfxZ9SSKSpkghYGabACyOaS0i0gR6xaBI4Gjm26nlcU622y6f8qWG64ZnTXH1G/h4l6uu6982uOry133CVTd+9zFXHVp8Gf7sUz921S34R99ux3O3Nr5zFACGc77rl5/uO8Cd+BvfrtNdX/Dtcp35gu93j8ON1216/j4cO9xXdTinjgREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREApfqLEKvUrtzV9i6na66/LJLXHX98zKuunFrd7jqvLMBvbsBN63wzT786/cWuOou7nzbVZfjkKvu/lu+6Krr2uX7W9r1+n5X3ZGF3Y0XseoGQgA6EhAJnkJAJHAKAZHARZ1F+G2SW0luIfkYyVxcCxORdLhDgOQsAN8EsNjM5gPIALg+roWJSDqi3h1oBdBBshXlYaTvRl+SiKQpyvCRdwB8D8BbAPYBOGJm6+JamIikI8rdgUkAlqM8mHQmgHEkb6hyuf+bRVg67l+piCQiyt2BzwHYbWYHzWwIwFoAl4++0GmzCFs6IrQTkSRECYG3AFxKspMkUZ5FuD2eZYlIWqI8JvASypOINwJ4vfK1HoxpXSKSkqizCO8GcHdMaxGRJtArBkUCl+ouwlIui+PzexovbKm9A6qezEnfDLzsBwVX3QTnbrLWmdNddYPTff2mbPbdLt7dgP80bZOr7u6Dv+OqO1QY76obbvfdnlM3+5714tG8q64tP9x4r1Lt+YU6EhAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJXKq7CFsKReT2Hm647sScya5+7PCNQWgpNL5LCwCKOd9uR2R8WZw5UXtnWD3DOV8/72xA727A70zd6qp7/NgEV93OwYtcdYfP9/2cTfONykTnnoGGa1pO1v6Z1pGASOAUAiKBUwiIBO6MIUDyYZIHSG4ZcdpkkutJvlH5f1KyyxSRpHyYI4FHACwdddodAJ4zs7kAnqt8LiJnoTOGgJm9AKB/1MnLAayqfLwKwLUxr0tEUuJ9TGCame2rfLwfwLSY1iMiKYv8wKCZGYCaT1ifNotwWLMIRcYabwi8R3IGAFT+P1DrgqfNIsxoFqHIWOMNgacB3Fj5+EYAT8WzHBFJ24d5ivAxAP8F4AKSfSS/BuC7AD5P8g2UpxN/N9llikhSzrh3wMy+XOOsK2Jei4g0gV4xKBK4VHcRomTgccccvDpz1Ooa9u0GLHX4bpYTU7y7CDOusnHvlVx1+em+65fjkKvOOxvQuxvwj8c3vssOAP614Ls9S75vH8pPrDn6ZRv//lmdeZ46EhAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJXKq7CK2tFcMzGp8rmBny7e4avOx8V13uwKCrbsaLrjIUd+911eWXzHDVnbv9hKvu/lu+6Kobbvf9rfHOBvTuBlz3+KozX6iKC1d+3VU3PGe6q67lWKHhGtbZsKgjAZHAKQREAqcQEAmcdxbhPSR3kHyN5BMkJya7TBFJincW4XoA883sYgC/BrAi5nWJSEpcswjNbJ2ZFSufbgDQk8DaRCQFcTwm8FUAP6t15sgxZEPFfAztRCROkUKA5F0AigBW17rMyDFkba3jorQTkQS4XyxE8iYAywBcYd73ThaRpnOFAMmlAG4H8Adm5nt5nYiMCd5ZhP8MoAvAepKbSP4w4XWKSEK8swgfSmAtItIEesWgSODSnUVIoNTWeO4MdTqX6Yy4zGHfU5nvL/DNzus+NNdVR99mOez6QrurrmuX7waduvm4q+7w+TlXnXc2oHc34I4/f8BVd+nOW1x1lmn8Wbbiu7VvFB0JiAROISASOIWASOAUAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAQu1V2EpWwL8j0dDdcNTvVlVfsR3za7gSt9M/6mvfiBq67U0eaq6+j3Xb/OF1xl6Hp9v6uOR327MqftdJXB+2533tmA3t2AG+7xvRfP0mtuaLgmc3y45nk6EhAJnEJAJHCuMWQjzruNpJGckszyRCRp3jFkINkLYAmAt2Jek4ikyDWGrOJelN92XDMHRM5irscESC4H8I6ZbY55PSKSsoafIiTZCeBOlO8KfJjL3wzgZgDIdk5qtJ2IJMxzJPAxAHMAbCa5B+WJxBtJVn2S9bRZhO2aRSgy1jR8JGBmrwPoPvV5JQgWm9n7Ma5LRFLiHUMmIh8R3jFkI8+fHdtqRCR1esWgSOAUAiKBS3cXYQY4OYEN13X1FV39Bs7zXb3Bmb7XP7XlJ7rqcodr7/CqZ/yTr7jqBq9e5Ko7srD7zBeqoi3vu36dewZcdaWs7/vecqzgqvPMBgR8uwEB4NmnH2245pIrq73er0xHAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAROISASOIWASOAUAiKBo3dum6sZeRDA3hpnTwGQ5luUqd/Z2++jfN2S6neemU2tdkaqIVAPyZfNbLH6qd9Y6hVCP90dEAmcQkAkcGMpBB5UP/Ubg70+8v3GzGMCItIcY+lIQESaQCEgEjiFgEjgFAIigVMIiATufwHjWWOKGPDxxgAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAECCAYAAAD+eGJTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAASW0lEQVR4nO3df2yc9X0H8Pfbd/6ZOLGDyU8bHApj/BA0LGJAUNc1HaT8Cms3iapMYUVimrZBKyTED2nd/pkqtWph2gRDhCYaWZAGKaVAQ9IU1HUMWBISEhIIIQmJk4BNSBzXjs/2+bM/7snmeHcO97nnec7h+35JUWyfP/587Tu//dzd870PzQwiEq6aai9ARKpLISASOIWASOAUAiKBUwiIBE4hIBK4qocAySUk3yO5m+T9CffqIPkKyR0k3yF5T5L9xvTNkHyL5Asp9Goh+QzJd0nuJHl1wv2+G/0st5NcTbIh5q//JMluktvHfGwGyfUk34/+b0243w+in+fbJH9KsiXJfmMuu5ekkWyLq18xVQ0BkhkA/wzgawAuBvBNkhcn2HIEwL1mdjGAqwD8VcL9TroHwM4U+gDAIwDWmtnvArg8yb4k5wG4G8BCM7sUQAbAbTG3WQFgybiP3Q9gg5ldAGBD9H6S/dYDuNTMLgOwC8ADCfcDyQ4A1wHYH2Ovoqp9JHAlgN1mtsfMhgA8DWBpUs3M7LCZbY7e7kPhF2ReUv0AgGQ7gBsBPJFkn6jXdABfArAcAMxsyMyOJdw2C6CRZBZAE4BDcX5xM/s1gE/HfXgpgJXR2ysB3JpkPzNbZ2Yj0buvA2hPsl/kxwDuA5D42XzVDoF5AA6Meb8LCf9SnkSyE8ACAG8k3OphFK7M0YT7AMB8AD0AfhLd/XiC5JSkmpnZQQA/ROGv1WEAvWa2Lql+Y8wys8PR2x8BmJVCz5O+DeAXSTYguRTAQTPbmmSfk6odAlVBciqAZwF8x8yOJ9jnJgDdZrYpqR7jZAFcAeBRM1sAoB/xHiqfIrovvhSF8JkLYArJ25PqV4wVzntP5dx3kg+hcJdyVYI9mgA8COBvk+oxXrVD4CCAjjHvt0cfSwzJWhQCYJWZrUmyF4BFAG4huQ+FuzpfIflUgv26AHSZ2cmjm2dQCIWkfBXAXjPrMbNhAGsAXJNgv5M+JjkHAKL/u5NuSPIOADcB+JYlu+HmCyiE6tbodtMOYDPJ2Uk1rHYI/DeAC0jOJ1mHwoNKzyfVjCRRuL+808x+lFSfk8zsATNrN7NOFL63X5lZYn8pzewjAAdIXhh9aDGAHUn1Q+FuwFUkm6Kf7WKk8wDo8wCWRW8vA/CzJJuRXILCXbpbzGwgyV5mts3MZppZZ3S76QJwRXTdJta0qv8A3IDCI64fAHgo4V7XonDo+DaALdG/G1L6Pr8M4IUU+nwRwMboe3wOQGvC/f4ewLsAtgP4VwD1MX/91Sg83jAc/ULcCeAsFJ4VeB/ALwHMSLjfbhQeuzp5m3ksyX7jLt8HoC3J65BRIxEJVLXvDohIlSkERAKnEBAJnEJAJHAKAZHATZoQIHmX+qnfZOsVQr9JEwIAUv3G1e+M7vd5/t5S7zeZQkBEqiDVk4XaZmSss6O26GU9R/I4+6xM0cve2+t7TQXLsuRlw7l+1NYX32CXOTFS9OOnM9SSLXnZyEA/sk0l+uVc7ZDpHyq9lvwJ1GUai1422lj8Ojidmly+dL+RAdRlm4peZhnn35qa4tff0FA/6uom2BzpvE2P1hVf5/BgP2obSvfjiPN3qMTNc3ioH7UTfH9W4ucykVz/pxjO9RctLH2rTUBnRy3efLnj9J84zuLb73T1G2zz3dinb/3EVffh12e66lp2l/7lmrDuTd/W/f5LfHtRmj446qrLtxYPh9PWNfhunsz7fin759W76uqP+v5oWKb8X2YAGJ5a/I/lRLate7jkZbo7IBK4ikIgzdcHFJFkuEOgCq8PKCIJqORIINXXBxSRZFQSAlV7fUARiU/iDwySvIvkRpIbe474HgUXkeRUEgKf6fUBzexxM1toZgtLnQcgItVTSQik+vqAIpIM98lCZjZC8q8BvIzC5Jknzeyd2FYmIqmo6IxBM3sJwEsxrUVEqkBnDIoELtW9A+/tbXPtA9jw1HJXvyXnLHTV2cXnu+rOfa7HVcfcsKvuwDfK34cBAGdv9e1Y6r3ct5ErM+SbwJYd8NXl631/26buP+Gqy3b7hlgdvn6Oq66ur/y9ETbBj0RHAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAROISASOIWASOAUAiKBS3UXoWXpmgrk3Q24dv9GV90NXz7XVce+flfd6MxWV13H6j2uut5Fvu/PuxtwcLrvZeWG5/punlO6fa9lWbvfN3nq+JXtrrq5a3zXn7U0l12T7S89JUlHAiKBUwiIBE4hIBK4SsaQdZB8heQOku+QvCfOhYlIOip5YHAEwL1mtplkM4BNJNeb2Y6Y1iYiKXAfCZjZYTPbHL3dB2AnNIZM5IwTy2MCJDsBLADwRhxfT0TSU3EIkJwK4FkA3zGz//eyq2NnEQ7nfM+ji0hyKgoBkrUoBMAqM1tT7HPGziKsrZ9SSTsRSUAlzw4QwHIAO83sR/EtSUTSVMmRwCIAfwbgKyS3RP9uiGldIpKSSgaS/gYAY1yLiFSBzhgUCRzNyp9r5jW9cY5d3XlH2XVW7ztg4aBvxt9Lrz7rqlty87dcdVbr3GU3tfwdmQBw7Pw6X79pvgO/hk98t7HsCV/d4Fm+v22ZnK9f25bfuuqGm33XQ76x/O/vrf/4R/Qd6yp6BepIQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwqc4iHGrJ4sOvzyy77tznelz9vLMBvbsB1/58latu0T1/4aqbcijnqmvu8s3q46hvl13Tvl5XXf95La662hO+mYl9ztmHQ631vrppvt2jTYcGy67hSOnrTkcCIoFTCIgETiEgErg45g5kSL5F8oU4FiQi6YrjSOAeFEaQicgZqNLhI+0AbgTwRDzLEZG0VXok8DCA+wD4npMRkaqrZALRTQC6zWzTaT7vf2cRjgxoFqHIZFPpBKJbSO4D8DQKk4ieGv9JY2cRZps0i1BksnGHgJk9YGbtZtYJ4DYAvzKz22NbmYikQucJiAQulr0DZvYqgFfj+Foiki4dCYgELtVdhJkc0LK7/B1szPlmCo7ObHXVeWcDencD/ucj/+Kqu37uF111o3/8+6467xDq/LQGV139Ed8uycFZvl19mWHfLslj5/lmQuZafT/P0Wxj+TU7S/+915GASOAUAiKBUwiIBE4hIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAROISASOIWASODS3UXYP4SWNw+VXXfgGx2ufh2r97jqcpfMc9V5ZwN6dwO+fGiLq+66P/X1yzf4dlfmG3w3s7rDx1119Rnf7rwjFzW56jqfPuiqs6PHXHUjl8wvu6ZmqPRrAetIQCRwCgGRwCkERAJX6QSiFpLPkHyX5E6SV8e1MBFJR6UPDD4CYK2Z/QnJOgC+R1ZEpGrcIUByOoAvAbgDAMxsCMBQPMsSkbRUcndgPoAeAD+JRpM/QVIjhkTOMJWEQBbAFQAeNbMFAPoB3D/+k8bOIhzKn6ignYgkoZIQ6ALQZWZvRO8/g0IonGLsLMK6TPkvlSwiyapkFuFHAA6QvDD60GIAO2JZlYikptJnB/4GwKromYE9AP688iWJSJoqCgEz2wJgYUxrEZEq0BmDIoGjmW/+mkdzS7stuPbususyudI7oCaSa/Ud6Ay0+bKxuav8OYsAMFrr2/XW2O3btbju31e46m78vSWuOu9MyHxTnasuM+A7XeXTy1pcdaO+UYSY+Ur5O2oBYLS5/AfYX9+1HL0Dh4re0HQkIBI4hYBI4BQCIoFTCIgETiEgEjiFgEjgFAIigVMIiAROISASOIWASOAUAiKBUwiIBE4hIBK4VGcR1uTyaPrgaNl1vZe3ufplJpi/NpHhab5dfRz17sj09fPOBvTuBnxx01pX3eO9c111R0d8r1s7NTPoqntsxc2uunkbel11RxbNcdU1Hyh/96hNMJ9RRwIigVMIiAROISASuEpnEX6X5Dskt5NcTbIhroWJSDrcIUByHoC7ASw0s0sBZADcFtfCRCQdld4dyAJoJJlFYRip70XTRKRqKhk+chDADwHsB3AYQK+ZrYtrYSKSjkruDrQCWIrCYNK5AKaQvL3I5/3fLMKRAf9KRSQRldwd+CqAvWbWY2bDANYAuGb8J50yizDbVEE7EUlCJSGwH8BVJJtIEoVZhDvjWZaIpKWSxwTeQGES8WYA26Kv9XhM6xKRlFQ6i/B7AL4X01pEpAp0xqBI4FLdRWiZGuRby39w0LsbcHC6b5ddwye+3YBN+3y7yfLTfCda5ht8V593NqB3N+Bd032nj3j7zcwed9XNfs337NVove96mLbXt9uxZnC4/KIJdrjqSEAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcKnuIkQNXTvfsgPOmYJzfd9ew1Ffv/7zWlx19UfKny0HAHWHfbvlRmb4Zvx5ZwOmvfvwwY8vc9X1nt/oqpuxzXc95Np8/Rp7+squYV67CEWkBIWASOAUAiKBO20IkHySZDfJ7WM+NoPkepLvR//7XqpGRKrusxwJrACwZNzH7gewwcwuALAhel9EzkCnDQEz+zWAT8d9eCmAldHbKwHcGvO6RCQl3scEZpnZ4ejtjwDMimk9IpKyih8YNDMDUPJJyFNmEQ71V9pORGLmDYGPSc4BgOj/7lKfeMoswjrfySYikhxvCDwPYFn09jIAP4tnOSKSts/yFOFqAP8F4EKSXSTvBPB9AH9E8n0UphN/P9llikhSTntyvZl9s8RFi2Nei4hUgc4YFAlcursIzSbczVRKvt6XVVO68666/tm+GYa1J5wzE2fVu+rqM3TVZft8uxanZnyz87yzAb27Af9h1tuuupfrFrnqPLdpAKjtc8wUBJB37AK1rtK/QzoSEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREAqcQEAlcqrsIR+tq0D+v/B1zU/efcPWr3f+Jq27wa+e46vqcsw8zw75daEcuanLVTe3yzcB7bMXNrrrZrw246ryzAb27ATf93aOuuhuvucVVN9A+21Xn2bVoE+w41ZGASOAUAiKBUwiIBM47i/AHJN8l+TbJn5JsSXaZIpIU7yzC9QAuNbPLAOwC8EDM6xKRlLhmEZrZOjMbid59HUB7AmsTkRTE8ZjAtwH8otSFY8eQDQ9qDJnIZFNRCJB8CMAIgFWlPmfsGLLaBo0hE5ls3CcLkbwDwE0AFkdDSUXkDOQKAZJLANwH4A/MzHc6mIhMCt5ZhP8EoBnAepJbSD6W8DpFJCHeWYTLE1iLiFSBzhgUCVyquwg5Yqg/OnL6Txwn2+2bZXf8St/pC21bfuuqG2r1zRQ8dl6tq67z6YOuuu4/nOuqm7eh11U3Wu+7mc3Y5rvevbMBvbsBX3zteV+/a2911eXOmVF2DSf4kehIQCRwCgGRwCkERAKnEBAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwqe4iBCeeiVbK4evnuNrNXbPHVTd40TxX3dC0jKsu11r+zwQA7OgxV93MV1xlOLLIdz1M2zvoqsu1+WYR1vYNu+q8swG9uwFf/M1zrrrfWfmXZdcM7dIsQhEpQSEgEjjXGLIxl91L0ki2JbM8EUmadwwZSHYAuA7A/pjXJCIpco0hi/wYhZcd18wBkTOY6zEBkksBHDSzrTGvR0RSVvZThCSbADyIwl2Bz/L5dwG4CwDqGzXBXGSy8RwJfAHAfABbSe5DYSLxZpJFn2Q9ZRZhnWYRikw2ZR8JmNk2ADNPvh8FwUIz+yTGdYlISrxjyETkc8I7hmzs5Z2xrUZEUqczBkUCpxAQCVyquwithhieWv5Ou7o+3/lI1tLsqss3+rKx6ZBvt9xo1rdbbuSS+a66TJ9vnc0Hcq66mkHfrr7Gnj5XXX6G71ko7wxDz2xAwLcbEAB2LXu07Jor/62n5GU6EhAJnEJAJHAKAZHAKQREAqcQEAmcQkAkcAoBkcApBEQCpxAQCZxCQCRwCgGRwCkERAKnEBAJHM3Se8Vwkj0APixxcRuANF+iTP3O3H6f5+8tqX7nmtnZxS5INQQmQnKjmS1UP/WbTL1C6Ke7AyKBUwiIBG4yhcDj6qd+k7DX577fpHlMQESqYzIdCYhIFSgERAKnEBAJnEJAJHAKAZHA/Q+xnHtpDs71SgAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -379,7 +324,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAECCAYAAAD+eGJTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAR9UlEQVR4nO3de4yc5XXH8e/x3tdefMHr+xJbBEG4JIBcCglJqpiASwiOVP4AlcSQSEg0LSRFQhDaRlX7R6SkIVRNnSJuVnBIJSAFIS52SQJEakjAxmCwAyYx+LL4Bovttde7sz79Y8bt4uysPWfe9501z+8jWd7dmbPn2Z3Z377z7jxzzN0RkXRNaPQCRKSxFAIiiVMIiCROISCSOIWASOIUAiKJa3gImNliM/udmW00s1ty7tVjZr8ws9fM7FUzuzHPfiP6NpnZGjN7rIBeU8zsQTPbYGbrzeyCnPt9s/K9XGdmD5hZe8af/x4z22Fm60Z8bJqZrTKzNyr/T82533cr38+XzexnZjYlz34jLrvJzNzMpmfVbzQNDQEzawJ+CPw5cDpwlZmdnmPLEnCTu58OnA98Ped+h90IrC+gD8AdwJPufhrwiTz7mtlc4AZgobufCTQBV2bc5j5g8REfuwV42t1PAZ6uvJ9nv1XAme7+ceB14Nac+2FmPcDFwNsZ9hpVo48EzgM2uvvv3X0Q+CmwJK9m7t7r7qsrb++l/AMyN69+AGY2D/gCcFeefSq9JgOfAe4GcPdBd+/LuW0z0GFmzUAnsC3LT+7uzwLvHvHhJcDyytvLgS/l2c/dV7p7qfLur4F5efaruB24Gcj92XyNDoG5wOYR728h5x/Kw8xsPnAO8HzOrX5A+cY8lHMfgAXATuDeysOPu8xsYl7N3H0r8D3Kv616gffdfWVe/UaY6e69lbffAWYW0POwrwJP5NnAzJYAW919bZ59Dmt0CDSEmU0CHgK+4e57cuxzGbDD3V/Mq8cRmoFzgWXufg7QT7aHyh9QeSy+hHL4zAEmmtnVefUbjZef917Ic9/N7DbKDylX5NijE/gW8A959ThSo0NgK9Az4v15lY/lxsxaKAfACnd/OM9ewKeAy81sE+WHOp8zs/tz7LcF2OLuh49uHqQcCnm5CPiDu+909yHgYeCTOfY7bLuZzQao/L8j74Zmdg1wGfCXnu+Gm5Mph+rayv1mHrDazGbl1bDRIfBb4BQzW2BmrZRPKj2aVzMzM8qPl9e7+/fz6nOYu9/q7vPcfT7lr+3n7p7bb0p3fwfYbGanVj60CHgtr36UHwacb2adle/tIoo5AfoosLTy9lLgkTybmdliyg/pLnf3/Xn2cvdX3H2Gu8+v3G+2AOdWbtvcmjb0H3Ap5TOubwK35dzrQsqHji8DL1X+XVrQ1/lnwGMF9DkbeKHyNf4XMDXnfv8IbADWAT8G2jL+/A9QPt8wVPmB+BpwIuW/CrwB/DcwLed+Gymfuzp8n/lRnv2OuHwTMD3P29AqjUQkUY1+OCAiDaYQEEmcQkAkcQoBkcQpBEQSN25CwMyuUz/1G2+9Uug3bkIAKPQLV7/jut+H+WsrvN94CgERaYBCnyw0fVqTz+9pGfWynbuH6T6xadTL1m/rDvWz4eqXDR3sp6Vt9A12Npz992RosJ+W1iob+izzdmP2i359pc7qvzNKB/pp7hi936HRb9ajmlDl9hurF4CVql4UUhrop7m9er+mwdgGUW8a/YYf874CWKn222/gYB+DQ/2jNmyu+bPVYX5PC795qufoVzzCn/zd9aF+rftid/a2vti9yCfEfpo9eDxW7U50NK17hkJ1u87qCNUNTAuV0T7aLvtj0NYX+6Ec65fGWCZtHgjVDU5pDdW17T5Yc81v1i6repkeDogkrq4QKPL1AUUkH+EQaMDrA4pIDuo5Eij09QFFJB/1hEDDXh9QRLKT+4lBM7vOzF4wsxd27g6efhWR3NQTAsf0+oDufqe7L3T3hdWeByAijVNPCBT6+oAiko/wk4XcvWRmfw08RXnyzD3u/mpmKxORQtT1jEF3fxx4PKO1iEgD6BmDIokrdO/A+m3doX0Av/3n6s97Hsvnr7o2VDfYNfomp6Pp3BQbZnSgpytUd3ByLMPbt8f2RkQ2rgBMDI6Tae2P9fPghqyJvYOhuua+2N6BgyfG9g7sO6n2PRyH1le/r+hIQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSZxCQCRxhe4itOHYVKDobsBVD9wbqotOPOo7JTZqp2VvcaPgAPZ8tPqIq7HMei44EqgUe23JgZ7JobqWPbHdgBa8GTZ+ZWqornNrbLvj1I21T5CyQ9W/OB0JiCROISCSOIWASOLqGUPWY2a/MLPXzOxVM7sxy4WJSDHqOTFYAm5y99Vm1gW8aGar3P21jNYmIgUIHwm4e6+7r668vRdYj8aQiRx3MjknYGbzgXOA57P4fCJSnLpDwMwmAQ8B33D3P3q53ZGzCIcO9tfbTkQyVlcImFkL5QBY4e4Pj3adkbMIW9piT1IRkfzU89cBA+4G1rv797NbkogUqZ4jgU8BXwY+Z2YvVf5dmtG6RKQg9Qwk/RUQnPUiIuOFnjEokriCdxE6bX21z8GLzgaM7gaMzj68+Iqlobo9C2qfLQcwMC2W4TOe2R6q23L5rFCdxTYRMtwWq2s+ECvsXnMgVHfCm6EyhoLnyftn1v5je6il+kG7jgREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREElfoLkIAn1D7SxB0bvqjly48JtHZgNHdgCsfXB6qu2TO2aG64WsvCNX1XhLbDdgU22RH565DobqDXbGXqxhuL/ZlLgaD6+x5YneobtuiE2uu8TF+3etIQCRxCgGRxCkERBKXxdyBJjNbY2aPZbEgESlWFkcCN1IeQSYix6F6h4/MA74A3JXNckSkaPUeCfwAuBmI/Q1IRBqunglElwE73P3Fo1zv/2cRDmoWoch4U+8EosvNbBPwU8qTiO4/8kofmEXYqlmEIuNNOATc/VZ3n+fu84ErgZ+7+9WZrUxECqHnCYgkLpO9A+7+S+CXWXwuESmWjgREElfsLkIbezdTNQd6ukLtWvZ6qC46GzC6G/CpbS+F6j799T8N1XX9ameojr69obLhk2eH6ibMid0OxG52Wre+F6qbt+z1UN2+i84I1c1ZsaHmmrf6BqpepiMBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcSZe3DLVUDXlHl+9mdvrLnu4ORYVpWCM+lKHbG6ln2x72V7X+x1Wp/74X+E6hYv+XKobnBqW6hu98daQ3WdO2Lfl87tQ6G65n2Dobq3LpsUquteE/v69s+o/efhdw/dzv4dm0e9Y+tIQCRxCgGRxCkERBJX7wSiKWb2oJltMLP1ZnZBVgsTkWLU+/JidwBPuvsVZtYKdGawJhEpUDgEzGwy8BngGgB3HwRip1dFpGHqeTiwANgJ3FsZTX6XmWnEkMhxpp4QaAbOBZa5+zlAP3DLkVfSLEKR8a2eENgCbHH35yvvP0g5FD5AswhFxrd6ZhG+A2w2s1MrH1oEvJbJqkSkMPX+deBvgBWVvwz8Hri2/iWJSJHqCgF3fwlYmNFaRKQB9IxBkcQVOovQhp3WPbXv8GrfXgr12/PR2InIGc9sD9X1XjIrVBedDRjdDfjkIz8O1Z37T9eH6uY80xeqc4vt5hyYE3vOmjfHdkm27Iutc3937Hewx9pVpSMBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQVuouw1DmBXWd11FxnpdiMv1nPvRuq23J5bDdg04FQGfTtDZUNLpgWqovuBlz998tCdde+/elQ3bSW2GtSdjTFZhE+dceFsX7bY/fP7me3her2nzqj5pqmg9XXqCMBkcQpBEQSpxAQSVy9swi/aWavmtk6M3vAzNqzWpiIFCMcAmY2F7gBWOjuZwJNwJVZLUxEilHvw4FmoMPMmikPI42d7hSRhqln+MhW4HvA20Av8L67r8xqYSJSjHoeDkwFllAeTDoHmGhmV49yvf+bRVg6oFmEIuNNPQ8HLgL+4O473X0IeBj45JFXGjmLsLlDswhFxpt6QuBt4Hwz6zQzozyLcH02yxKRotRzTuB5ypOIVwOvVD7XnRmtS0QKUu8swm8D385oLSLSAHrGoEjiCt1FeKgJBgIb3yZuDTYsDYfKLFZG565Dobrhk2eH6nZ/rDVUF50NGN0NeO9Jz4Xqbuo9N1T38vtzQ3XNA7HdgJ3bY7sWo/fPgWlNNdccaq4+wFBHAiKJUwiIJE4hIJI4hYBI4hQCIolTCIgkTiEgkjiFgEjiFAIiiVMIiCROISCSOIWASOIUAiKJK3QX4YRhaA+MB2ztj+3uGuiZHKobbguVcbCr+k6tsUyYU/t8RoDOHbFdi26xdUZnA0Z3A/7L7NWhuvv21D6rD+Anm2aG6t49ozNU19Ybu92nrn2v5prm/aWql+lIQCRxCgGRxCkERBJ31BAws3vMbIeZrRvxsWlmtsrM3qj8PzXfZYpIXo7lSOA+YPERH7sFeNrdTwGerrwvIseho4aAuz8LHHlOfwmwvPL2cuBLGa9LRAoSPScw0917K2+/A8T+tiIiDVf3iUF3d6DqH/I1i1BkfIuGwHYzmw1Q+X9HtStqFqHI+BYNgUeBpZW3lwKPZLMcESnasfyJ8AHgf4BTzWyLmX0N+A7weTN7g/J04u/ku0wRyctR9w64+1VVLlqU8VpEpAH0jEGRxBW6i9BK0NZX+843j216o2XPYKiu+UBsG+Fwe3ChsU2S4Rl4A3Niu946mmL9orMBo7sBrzmh6nnqMf3ngdjXN239gVBd2LbA1zekXYQiUoVCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSVyhuwgBbLj2mom9sd2AFtyd172m2F1hrVtrny0HUJreFarz5tguyafuuDBU1zwQuyGiswGjuwGfePwnoboz//WvQnUfuX9nqG7XF0+ruab0WHvVy3QkIJI4hYBI4hQCIomLziL8rpltMLOXzexnZjYl32WKSF6iswhXAWe6+8eB14FbM16XiBQkNIvQ3Ve6++EXLfs1MC+HtYlIAbI4J/BV4IlqF35gDNmAxpCJjDd1hYCZ3QaUgBXVrvOBMWTtGkMmMt6EnyxkZtcAlwGLKkNJReQ4FAoBM1sM3Ax81t33Z7skESlSdBbhvwFdwCoze8nMfpTzOkUkJ9FZhHfnsBYRaQA9Y1AkcYXuImwaPMSkzQM11zX31V4DsPErU0N1J7wZKmOwKzaLcN6y10N1b10zO1TXsi+2zo7tsfO/0ZmJ754Rm5kYnQ0Y3Q247oZ/D9WdVYr16wzeDtXoSEAkcQoBkcQpBEQSpxAQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcYXuIvQmY3BKa811B0+svQagc2tst9xQ8KUQe57YHarbd9EZobruNYdCdfu7Y9nf/ey2UB2lwABKoK23I9YvKDobMLob8JW/je0+vPgvltZc0zRQ/b6iIwGRxCkERBIXGkM24rKbzMzNbHo+yxORvEXHkGFmPcDFwNsZr0lEChQaQ1ZxO+WXHdfMAZHjWOicgJktAba6+9qM1yMiBav5T4Rm1gl8i/JDgWO5/nXAdQBtHZpgLjLeRI4ETgYWAGvNbBPlicSrzWzWaFceOYuwpVWzCEXGm5qPBNz9FWDG4fcrQbDQ3XdluC4RKUh0DJmIfEhEx5CNvHx+ZqsRkcLpGYMiiVMIiCTO3It7rs8Jk+b6eZ+4vua6fSfFdpO17ovtXuufGdtcOTQptmtxzooNobrtV5waqvPYMpm8qRSqG5jWFKqbuva9UB3bdoTKdn/xtFi/4I/Q5I37Q3UrH1pec815l2zmhbUDo97yOhIQSZxCQCRxCgGRxCkERBKnEBBJnEJAJHEKAZHEKQREEqcQEEmcQkAkcQoBkcQpBEQSpxAQSVyhuwjNbCfwVpWLpwNFvkSZ+h2//T7MX1te/T7i7t2jXVBoCIzFzF5w94Xqp37jqVcK/fRwQCRxCgGRxI2nELhT/dRvHPb60PcbN+cERKQxxtORgIg0gEJAJHEKAZHEKQREEqcQEEnc/wKh62d+C5b7vAAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAECCAYAAAD+eGJTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAASFElEQVR4nO3de2zd9XnH8c8T23Fw7s6NmoQEEIERxDWiUBBDhVKWRKR/jAk0ptAiMZWtpRUShaKt3V/r1Kql2gYd4qo2A22UDkQpJNBWCDbSJWkC5AJmIQshTuyGJA1OHN+e/XFONsfzcTjP+f1+x+H7fklRbJ/z+Pmeiz/+nePzPY+5uwCka1y9FwCgvggBIHGEAJA4QgBIHCEAJI4QABJX9xAws+vM7G0ze9fM7s651zwz+5WZbTazTWZ2R579hvRtMLPfmtlzBfSaZmZPmdlWM9tiZpfl3O/r5evyLTN7wswmZPz9HzGzTjN7a8jXWs1stZm1l/+fnnO/75avzzfM7GdmNi3PfkNOu9PM3MxmZtVvJHUNATNrkPSPkv5I0jmSbjKzc3Js2S/pTnc/R9Klkv4i535H3SFpSwF9JOmHkl5w97MlnZ9nXzM7RdJXJS1293MlNUi6MeM2j0m6btjX7pb0srufKenl8ud59lst6Vx3P0/SO5LuybmfzGyepGsl7ciw14jqfSRwiaR33X2bu/dKelLS8ryauXuHu68vf3xQpR+QU/LqJ0lmNlfSUkkP5dmn3GuqpCslPSxJ7t7r7vtzbtso6SQza5TUImlXlt/c3V+R9OGwLy+X9Hj548clfSHPfu6+yt37y5++Lmlunv3KfiDpLkm5v5qv3iFwiqT3h3y+Uzn/UB5lZgskXShpTc6t7lPpxhzMuY8knSapS9Kj5YcfD5nZxLyaufsHkr6n0m+rDkkH3H1VXv2GmOPuHeWPd0uaU0DPo74k6Rd5NjCz5ZI+cPeNefY5qt4hUBdmNknSTyV9zd1/n2OfZZI63X1dXj2GaZR0kaQH3P1CSd3K9lD5GOXH4stVCp82SRPN7Oa8+o3ES697L+S172Z2r0oPKVfm2KNF0jcl/XVePYardwh8IGnekM/nlr+WGzNrUikAVrr703n2knS5pOvNbLtKD3U+a2Y/ybHfTkk73f3o0c1TKoVCXq6R9J67d7l7n6SnJX0mx35H7TGzT0lS+f/OvBua2S2Slkn6U893w80ZKoXqxvL9Zq6k9WZ2cl4N6x0C/ynpTDM7zczGq/Sk0rN5NTMzU+nx8hZ3/35efY5y93vcfa67L1Dpsv3S3XP7TenuuyW9b2Znlb90taTNefVT6WHApWbWUr5ur1YxT4A+K2lF+eMVkp7Js5mZXafSQ7rr3f1Qnr3c/U13n+3uC8r3m52SLirftrk1res/SUtUesb1vyTdm3OvK1Q6dHxD0obyvyUFXc6rJD1XQJ8LJK0tX8Z/kzQ9535/I2mrpLck/VhSc8bf/wmVnm/oK/9A3Cpphkp/FWiX9JKk1pz7vavSc1dH7zM/yrPfsNO3S5qZ521o5UYAElXvhwMA6owQABJHCACJIwSAxBECQOLGTAiY2W30o99Y65VCvzETApIKveD0O6H7fZIvW+H9xlIIAKiDQl8sNLO1wRfMaxrxtK69A5o1o2HE097ZNiPUb6C5csb193SrcUK2G+zcKp82Wj9vjPVr7K582/X1daupaeR+g+NHWegoGg5X3gjZ19+tpsaR+w1MiP2usQoXr+/IR2pqnlSxbjB4fTb0jNxwtOtSkmwgtkG0b/LI9/eBQ91qaBnlvhm4+foOfKj+Q90jVgavrpgF85r0mxfnHf+Mw3zupi+G+h04LfgmN8FcHGiO1fXMjP1QnrzmSKju4NzxobrWzR+F6vYvjIVtQ2/shjg0e+QfruNp3Rq7Ppv294TqOq6cGqqL3M/ee7TyVhkeDgCJqykEinx/QAD5CIdAHd4fEEAOajkSKPT9AQHko5YQqNv7AwLITu5PDJrZbWa21szWdu0dyLsdgCrVEgIf6/0B3f1Bd1/s7osrvQ4AQP3UEgKFvj8ggHyEXyzk7v1m9peSXlRp8swj7r4ps5UBKERNrxh09+clPZ/RWgDUAa8YBBJX6N6Bd7bNCO0DWP3Eo6F+Sy/+f3MeP5bui6rf3yBJPdNiT3zOeTL2KGrPTYtCdS2dsQ0vHZdPDtUNBLdwNByO1bV0xS5f1wWxzR/T347d7t1tsb0RJ+0J7DUZpRVHAkDiCAEgcYQAkDhCAEgcIQAkjhAAEkcIAIkjBIDEEQJA4ggBIHGEAJA4QgBIHCEAJK7QXYQDzeNCU4GiuwF/vu6FUN0VX/nzUN3Ula+H6g7c8OlQ3bT23lDd3kWx3XKzNsQm7XReHNtGOGNTbCLQYFPsd9v0Ve2hur5Fp4bq2l4LlWnyul1V1+zcV/m+wpEAkDhCAEgcIQAkrpYxZPPM7FdmttnMNpnZHVkuDEAxanlisF/Sne6+3swmS1pnZqvdfXNGawNQgPCRgLt3uPv68scHJW0RY8iAE04mzwmY2QJJF0pak8X3A1CcmkPAzCZJ+qmkr7n770c4/X9nEfb3dNfaDkDGagoBM2tSKQBWuvvTI51n6CzCxgkTa2kHIAe1/HXAJD0saYu7fz+7JQEoUi1HApdL+jNJnzWzDeV/SzJaF4CC1DKQ9FVJgVEoAMYSXjEIJK7QXYSSRp2JVkl0NmB0N+Crf/9PobqLZn85VBedndffEpuBd6Q1VKa958R2A07dNhCq624bH6qzgdiMv8NLFobq9p0dKlPz/tiB9IH51e9a7P3nytclRwJA4ggBIHGEAJA4QgBIHCEAJI4QABJHCACJIwSAxBECQOIIASBxhACQOEIASBwhACSu0F2EbtJAYAxez7TYbrnobMDobsD1f/VAqO78v7s9VDcutjlPU96L7bLrmRHb9TYYvJf1tMb69U2K1fWcGZu1OGVtbHdl3+RQmVp2V3/7jXZf4UgASBwhACSOEAASl8XcgQYz+62ZPZfFggAUK4sjgTtUGkEG4ARU6/CRuZKWSnoom+UAKFqtRwL3SbpLUuydMgHUXS0TiJZJ6nT3dcc5H7MIgTGs1glE15vZdklPqjSJ6CfDz8QsQmBsC4eAu9/j7nPdfYGkGyX90t1vzmxlAArB6wSAxGWyd8Ddfy3p11l8LwDF4kgASFyxuwgbpZ6Z1e/wmvPkplC/Azd8OlQXnQ0Y3Q248Rv3h+quveGWUN3B+bFdb+P6orsIY3Uz3zwSqtu+tClUt/C+WD+Ni9XtPW9KqM4GA7tARynhSABIHCEAJI4QABJHCACJIwSAxBECQOIIASBxhACQOEIASBwhACSOEAASRwgAiSMEgMQVuouwsdt18prqd1ztuWlRqN+09t5QXX9LbPZhdDZgdDfgqn99LFR3yT2xWYuj7UQbzaHZwd81FtsN2LI71q99RWw44Pzn+0N1LV2xO0zzvurv1w29lXfGciQAJI4QABJHCACJq3UC0TQze8rMtprZFjO7LKuFAShGrU8M/lDSC+7+x2Y2XlJLBmsCUKBwCJjZVElXSrpFkty9V1Ls6XgAdVPLw4HTJHVJerQ8mvwhM2PEEHCCqSUEGiVdJOkBd79QUreku4efaegswr4+ZhECY00tIbBT0k53X1P+/CmVQuEYQ2cRNjVxoACMNbXMItwt6X0zO6v8paslbc5kVQAKU+tfB74iaWX5LwPbJH2x9iUBKFJNIeDuGyQtzmgtAOqAVwwCiSt0F+HgeNPBueOrrmvpjM0G3LuoOVR3pDVUpinvxbbZRWcDRncD/uZvHwjVLb18eaju8BkzQ3UWu9nV1B3bBdq8L1YXFZopKOngqdXfXwY2V/59z5EAkDhCAEgcIQAkjhAAEkcIAIkjBIDEEQJA4ggBIHGEAJA4QgBIHCEAJI4QABJHCACJK3QXYcPhQbVu/qjquo7LYzPiZm3oCdXtPSe2q69nhoXqxvXF6qKzAaO7AX/+2jOhult3XBGqe+N3baG6q9raQ3XPvHhpqG5CcPfhoVmx38G9U6q/vwyOsnmXIwEgcYQAkDhCAEhcrbMIv25mm8zsLTN7wsxiD6YB1E04BMzsFElflbTY3c+V1CDpxqwWBqAYtT4caJR0kpk1qjSMdFftSwJQpFqGj3wg6XuSdkjqkHTA3VdltTAAxajl4cB0SctVGkzaJmmimd08wvn+bxZhP7MIgbGmlocD10h6z9273L1P0tOSPjP8TMfMImxkFiEw1tQSAjskXWpmLWZmKs0i3JLNsgAUpZbnBNaoNIl4vaQ3y9/rwYzWBaAgtc4i/Jakb2W0FgB1wCsGgcQVuotwYMI47V9Y/ZODA8HXIXZeHCucum0gVDcYvDYHG2O7CA/NjmV4dDZgdDfgw6e+Gqr7fPeyUF33QGwG5ZRtoTJN2h77q9eB06eE6lrf7q+6ZkdP5S2nHAkAiSMEgMQRAkDiCAEgcYQAkDhCAEgcIQAkjhAAEkcIAIkjBIDEEQJA4ggBIHGEAJC4QncRmksNvdUP0Gs4HOs3Y9ORUF132yiD20bR0xrbDTjzzdg6ZU2xssFYu+hswOhuwBf/4LlQ3emrbg3V+YWx3aPWH5uVOXtd7HbvnRr5sa183+RIAEgcIQAkjhAAEnfcEDCzR8ys08zeGvK1VjNbbWbt5f+n57tMAHn5OEcCj0m6btjX7pb0srufKenl8ucATkDHDQF3f0XSh8O+vFzS4+WPH5f0hYzXBaAg0ecE5rh7R/nj3ZLmZLQeAAWr+YlBd3dJFf/4f8wswiMf1doOQMaiIbDHzD4lSeX/Oyud8ZhZhM2Tgu0A5CUaAs9KWlH+eIWkZ7JZDoCifZw/ET4h6T8knWVmO83sVknfkfQ5M2tXaTrxd/JdJoC8HPdFyO5+U4WTrs54LQDqgFcMAokrdBfhYKN0aHZD1XUtXbFtb4NNsYyzgep3OkpS36TYLsLtS2O7AVt2xy5fU3f1t4EkXdXWHqqLzgaM7gbcdu3DobqLv/3lUN3sl94P1XUsmxeqi+xW7f/3yqdxJAAkjhAAEkcIAIkjBIDEEQJA4ggBIHGEAJA4QgBIHCEAJI4QABJHCACJIwSAxBECQOIK3UXY0ONq3Vr9/LWuC2K70Kaviu16O7xkYaiu58yeUN3C+2Iz6dpXxGbgNe+L7SJ85sVLQ3VTtoXKwrMBo7sB1337gVDd+SfdHqqLzOWUpIm7AvM8eyufxpEAkDhCAEgcIQAkLjqL8LtmttXM3jCzn5nZtHyXCSAv0VmEqyWd6+7nSXpH0j0ZrwtAQUKzCN19lbv3lz99XdLcHNYGoABZPCfwJUm/qHTiMWPI+rozaAcgSzWFgJndK6lf0spK5zlmDFnTxFraAchB+MVCZnaLpGWSri4PJQVwAgqFgJldJ+kuSX/o7oeyXRKAIkVnEf6DpMmSVpvZBjP7Uc7rBJCT6CzC2IgXAGMOrxgEElfoLkIbGFTT/up32k1/O7brrW/RqaG6fWeHyjRl7YRY4bjYLsL5z/cf/0wZmhDcfThpe+xPw9Yf2yUZnQ0Y3Q248Rv3h+qWXPMnobrOy2ZUXeOjjC/kSABIHCEAJI4QABJHCACJIwSAxBECQOIIASBxhACQOEIASBwhACSOEAASRwgAiSMEgMQVuouwb3KDOq6cWnVdd1vs3cvaXguVqXn/KFuuRtEX2/SmvedNCdW1dMVm9dlg7Po8NCv2O+PA6bHLN3tdbHdlx7J5obrobMDobsDnX/qXUN2Vt99Wdc24/sqXjSMBIHGEAJC40BiyIafdaWZuZjPzWR6AvEXHkMnM5km6VtKOjNcEoEChMWRlP1DpbceZOQCcwELPCZjZckkfuPvGjNcDoGBV/4nQzFokfVOlhwIf5/y3SbpNkpomT6+2HYCcRY4EzpB0mqSNZrZdpYnE683s5JHOPHQWYUMLswiBsabqIwF3f1PS7KOfl4Ngsbv/LsN1AShIdAwZgE+I6BiyoacvyGw1AArHKwaBxBECQOIK3UUokwaaqy87aU9sV9/kdbtCdQfmx2YYtuyOvW4ququveV9vqO7gqbGZib1TYrdD69uxmYm9U2N3z57W2Don7ordDpHZgFJsN6AkvXL/g1XXXPL5ys/bcyQAJI4QABJHCACJIwSAxBECQOIIASBxhACQOEIASBwhACSOEAASRwgAiSMEgMQRAkDizL24dww3sy5J/13h5JmSinyLMvqduP0+yZctr37z3X3WSCcUGgKjMbO17r6YfvQbS71S6MfDASBxhACQuLEUAtW/XQr9Uu33Sb5shfcbM88JAKiPsXQkAKAOCAEgcYQAkDhCAEgcIQAk7n8AHHtTv1Gh3LcAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -388,33 +333,6 @@ "needs_background": "light" }, "output_type": "display_data" - }, - { - "data": { - "application/javascript": [ - "\n", - " setTimeout(function() {\n", - " var nbb_cell_id = 3;\n", - " var nbb_unformatted_code = \"from brainiak.matnormal.mnrsa import MNRSA\\nfrom brainiak.matnormal.covs import CovIdentity\\nfrom sklearn.linear_model import LinearRegression\\n\\n# beta_series RSA\\nmodel_linreg = LinearRegression(fit_intercept=False)\\nmodel_linreg.fit(true_X, true_Y)\\nbeta_series = model_linreg.coef_\\nnaive_RSA = np.corrcoef(beta_series.T)\\n\\n# MN-RSA\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nmodel_matnorm = MNRSA(time_cov=time_cov, space_cov=space_cov, n_nureg=3)\\n\\nmodel_matnorm.fit(true_Y, true_X)\\n\\n# very similar on this toy data but as we show in the paper can be very different\\n# in other examples\\nplt.matshow(model_matnorm.C_)\\nplt.matshow(naive_RSA)\";\n", - " var nbb_formatted_code = \"from brainiak.matnormal.mnrsa import MNRSA\\nfrom brainiak.matnormal.covs import CovIdentity\\nfrom sklearn.linear_model import LinearRegression\\n\\n# beta_series RSA\\nmodel_linreg = LinearRegression(fit_intercept=False)\\nmodel_linreg.fit(true_X, true_Y)\\nbeta_series = model_linreg.coef_\\nnaive_RSA = np.corrcoef(beta_series.T)\\n\\n# MN-RSA\\nspace_cov = CovDiagonal(size=n_V)\\ntime_cov = CovAR1(size=n_T)\\n\\nmodel_matnorm = MNRSA(time_cov=time_cov, space_cov=space_cov, n_nureg=3)\\n\\nmodel_matnorm.fit(true_Y, true_X)\\n\\n# very similar on this toy data but as we show in the paper can be very different\\n# in other examples\\nplt.matshow(model_matnorm.C_)\\nplt.matshow(naive_RSA)\";\n", - " var nbb_cells = Jupyter.notebook.get_cells();\n", - " for (var i = 0; i < nbb_cells.length; ++i) {\n", - " if (nbb_cells[i].input_prompt_number == nbb_cell_id) {\n", - " if (nbb_cells[i].get_text() == nbb_unformatted_code) {\n", - " nbb_cells[i].set_text(nbb_formatted_code);\n", - " }\n", - " break;\n", - " }\n", - " }\n", - " }, 500);\n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" } ], "source": [ From d3c0a5a9bf1f2e7b862cad7c28a30014ccb7d5f0 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 13 Aug 2020 20:20:24 -0700 Subject: [PATCH 53/84] minor docstring cleanup --- brainiak/matnormal/covs.py | 6 ++++++ brainiak/matnormal/utils.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 743a6917f..d5253958d 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -360,6 +360,9 @@ def __init__(self, size=None, Sigma=None): @property def L(self): + """ + Cholesky factor of this covariance + """ return unflatten_cholesky_unique(self.L_flat) @property @@ -443,6 +446,9 @@ def __init__(self, size=None, invSigma=None): @property def Linv(self): + """ + Inverse of Cholesky factor of this covariance + """ return unflatten_cholesky_unique(self.Linv_flat) @property diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 259c85a01..ea5b88698 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -6,7 +6,7 @@ def rmn(rowcov, colcov): - # generate random draws from a zero-mean matrix-normal distribution + """ generate random draws from a zero-mean matrix-normal distribution """ Z = norm.rvs(size=(rowcov.shape[0], colcov.shape[0])) return cholesky(rowcov).dot(Z).dot(cholesky(colcov)) From 73d294560c3edb13328fc2a326432244cc2f295d Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 13 Aug 2020 20:33:37 -0700 Subject: [PATCH 54/84] fix the kron covs to work correctly with the new optimizer wrapper --- brainiak/matnormal/covs.py | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index d5253958d..bbe3a56f7 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -177,7 +177,9 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): tf.random.normal([1], dtype=tf.float64), name="rho_unc" ) else: - self.rho_unc = tf.Variable(scipy.special.logit(rho / 2 + 0.5), name='rho_unc') + self.rho_unc = tf.Variable( + scipy.special.logit(rho / 2 + 0.5), name="rho_unc" + ) @property def logdet(self): @@ -187,8 +189,9 @@ def logdet(self): rho = 2 * tf.sigmoid(self.rho_unc) - 1 # now compute logdet return tf.reduce_sum( - input_tensor=2 * tf.constant(self.run_sizes, dtype=tf.float64) * - self.log_sigma + input_tensor=2 + * tf.constant(self.run_sizes, dtype=tf.float64) + * self.log_sigma - tf.math.log(1 - tf.square(rho)) ) @@ -442,7 +445,9 @@ def __init__(self, size=None, invSigma=None): else: Linv = np.linalg.cholesky(invSigma) - self.Linv_flat = tf.Variable(flatten_cholesky_unique(Linv), name="Linv_flat") + self.Linv_flat = tf.Variable( + flatten_cholesky_unique(Linv), name="Linv_flat" + ) @property def Linv(self): @@ -512,33 +517,32 @@ def __init__(self, sizes, Sigmas=None, mask=None): self.nfactors = len(sizes) self.size = np.prod(np.array(sizes), dtype=np.int32) + npar = [(size * (size + 1)) // 2 for size in self.sizes] if Sigmas is None: - self.L_full = [ + self.Lflat = [ tf.Variable( - tf.random.normal([sizes[i], sizes[i]], dtype=tf.float64), - name="L" + str(i) + "_full", + tf.random.normal([npar[i]], dtype=tf.float64), + name="L" + str(i) + "_flat", ) for i in range(self.nfactors) ] else: - self.L_full = [ - tf.Variable(np.linalg.cholesky(Sigmas[i]), name="L" + str(i) + "_full") + self.Lflat = [ + tf.Variable(flatten_cholesky_unique(np.linalg.cholesky(Sigmas[i])), name="L" + str(i) + "_flat") for i in range(self.nfactors) ] self.mask = mask - # make a list of choleskys - L_indeterminate = [tf.linalg.band_part(mat, -1, 0) for mat in self.L_full] - self.L = [ - tf.linalg.set_diag(mat, tf.exp(tf.linalg.diag_part(mat))) - for mat in L_indeterminate - ] + + @property + def L(self): + return [unflatten_cholesky_unique(mat) for mat in self.Lflat] def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit this covariance """ - return self.L_full + return self.Lflat @property def logdet(self): From 98607eacabe301b3788224fec6f60c0b83c12c18 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 13 Aug 2020 20:56:08 -0700 Subject: [PATCH 55/84] autoformat --- brainiak/matnormal/covs.py | 52 +++++---- brainiak/matnormal/matnormal_likelihoods.py | 111 ++++++++++++-------- brainiak/matnormal/mnrsa.py | 17 +-- brainiak/matnormal/regression.py | 18 ++-- brainiak/matnormal/utils.py | 9 +- brainiak/utils/kronecker_solvers.py | 33 ++++-- 6 files changed, 147 insertions(+), 93 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index bbe3a56f7..2022e2783 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -146,9 +146,11 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): if scan_onsets is None: self.run_sizes = [size] self.offdiag_template = tf.constant( - scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)]), dtype=tf.float64 + scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)]), + dtype=tf.float64 ) - self.diag_template = tf.constant(np.diag(np.r_[0, np.ones(size - 2), 0])) + self.diag_template = tf.constant( + np.diag(np.r_[0, np.ones(size - 2), 0])) else: self.run_sizes = np.ediff1d(np.r_[scan_onsets, size]) sub_offdiags = [ @@ -158,7 +160,8 @@ def __init__(self, size, rho=None, sigma=None, scan_onsets=None): self.offdiag_template = tf.constant( scipy.sparse.block_diag(sub_offdiags).toarray() ) - subdiags = [np.diag(np.r_[0, np.ones(r - 2), 0]) for r in self.run_sizes] + subdiags = [np.diag(np.r_[0, np.ones(r - 2), 0]) + for r in self.run_sizes] self.diag_template = tf.constant( scipy.sparse.block_diag(subdiags).toarray() ) @@ -289,7 +292,8 @@ def __init__(self, size, diag_var=None): tf.random.normal([size], dtype=tf.float64), name="precisions" ) else: - self.logprec = tf.Variable(np.log(1 / diag_var), name="log-precisions") + self.logprec = tf.Variable( + np.log(1 / diag_var), name="log-precisions") @property def logdet(self): @@ -327,7 +331,8 @@ def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10): scale=tf.constant(beta, dtype=tf.float64), ) - self.logp = tf.reduce_sum(input_tensor=self.ig.log_prob(tf.exp(self.logprec))) + self.logp = tf.reduce_sum( + input_tensor=self.ig.log_prob(tf.exp(self.logprec))) class CovUnconstrainedCholesky(CovBase): @@ -357,7 +362,8 @@ def __init__(self, size=None, Sigma=None): else: L = np.linalg.cholesky(Sigma) - self.L_flat = tf.Variable(flatten_cholesky_unique(L), name="L_flat") + self.L_flat = tf.Variable( + flatten_cholesky_unique(L), name="L_flat") self.optimize_vars = [self.L_flat] @@ -370,7 +376,8 @@ def L(self): @property def logdet(self): - return 2 * tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.diag_part(self.L))) + return 2 * tf.reduce_sum(input_tensor=tf.math.log( + tf.linalg.diag_part(self.L))) def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit @@ -425,10 +432,12 @@ class CovUnconstrainedInvCholesky(CovBase): def __init__(self, size=None, invSigma=None): if size is None and invSigma is None: - raise RuntimeError("Must pass either invSigma or size but not both") + raise RuntimeError( + "Must pass either invSigma or size but not both") if size is not None and invSigma is not None: - raise RuntimeError("Must pass either invSigma or size but not both") + raise RuntimeError( + "Must pass either invSigma or size but not both") if invSigma is not None: size = invSigma.shape[0] @@ -528,12 +537,14 @@ def __init__(self, sizes, Sigmas=None, mask=None): ] else: self.Lflat = [ - tf.Variable(flatten_cholesky_unique(np.linalg.cholesky(Sigmas[i])), name="L" + str(i) + "_flat") + tf.Variable( + flatten_cholesky_unique(np.linalg.cholesky(Sigmas[i])), + name="L" + str(i) + "_flat", + ) for i in range(self.nfactors) ] self.mask = mask - @property def L(self): return [unflatten_cholesky_unique(mat) for mat in self.Lflat] @@ -550,18 +561,21 @@ def logdet(self): """ if self.mask is None: n_list = tf.stack( - [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in self.L] + [tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) + for mat in self.L] ) n_prod = tf.reduce_prod(input_tensor=n_list) logdet = tf.stack( [ tf.reduce_sum( - input_tensor=tf.math.log(tf.linalg.tensor_diag_part(mat)) + input_tensor=tf.math.log( + tf.linalg.tensor_diag_part(mat)) ) for mat in self.L ] ) - logdetfinal = tf.reduce_sum(input_tensor=(logdet * n_prod) / n_list) + logdetfinal = tf.reduce_sum( + input_tensor=(logdet * n_prod) / n_list) else: n_list = [tf.shape(input=mat)[0] for mat in self.L] mask_reshaped = tf.reshape(self.mask, n_list) @@ -569,10 +583,12 @@ def logdet(self): for i in range(self.nfactors): indices = list(range(self.nfactors)) indices.remove(i) - logdet += tf.math.log(tf.linalg.tensor_diag_part(self.L[i])) * tf.cast( - tf.reduce_sum(input_tensor=mask_reshaped, axis=indices), - dtype=tf.float64, - ) + logdet += (tf.math.log(tf.linalg.tensor_diag_part(self.L[i])) * + tf.cast( + tf.reduce_sum( + input_tensor=mask_reshaped, axis=indices), + dtype=tf.float64, + )) logdetfinal = tf.reduce_sum(input_tensor=logdet) return 2.0 * logdetfinal diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index e4834229d..cc89fcb8c 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -54,15 +54,18 @@ def solve_det_marginal(x, sigma, A, Q): # of things we invert. This includes Q and Sigma, as well # as the "lemma factor" for lack of a better definition logging.log(logging.DEBUG, "Printing diagnostics for solve_det_marginal") + lemma_cond = _condition( + Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True)) logging.log( logging.DEBUG, - f"lemma_factor condition={_condition(Q._prec + tf.matmul(A, sigma.solve(A),transpose_a=True))}", + f"lemma_factor condition={lemma_cond}", ) logging.log(logging.DEBUG, f"Q condition={_condition(Q._cov)}") logging.log(logging.DEBUG, f"sigma condition={_condition(sigma._cov)}") logging.log( logging.DEBUG, - f"sigma max={tf.reduce_max(input_tensor=A)}, sigma min={tf.reduce_min(input_tensor=A)}", + f"sigma max={tf.reduce_max(input_tensor=A)}," + + "sigma min={tf.reduce_min(input_tensor=A)}", ) # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like @@ -75,14 +78,19 @@ def solve_det_marginal(x, sigma, A, Q): logdet = ( Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor))) + + 2 * + tf.reduce_sum(input_tensor=tf.math.log( + tlinalg.diag_part(lemma_factor))) ) logging.log(logging.DEBUG, f"Log-determinant of Q={Q.logdet}") logging.log(logging.DEBUG, f"sigma logdet={sigma.logdet}") + lemma_logdet = 2 * \ + tf.reduce_sum(input_tensor=tf.math.log( + tlinalg.diag_part(lemma_factor))) logging.log( logging.DEBUG, - f"lemma factor logdet={2* tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor)))}", + f"lemma factor logdet={lemma_logdet}", ) # A' Sigma^{-1} @@ -132,7 +140,9 @@ def solve_det_conditional(x, sigma, A, Q): logdet = ( -Q.logdet + sigma.logdet - + 2 * tf.reduce_sum(input_tensor=tf.math.log(tlinalg.diag_part(lemma_factor))) + + 2 * + tf.reduce_sum(input_tensor=tf.math.log( + tlinalg.diag_part(lemma_factor))) ) # A' Sigma^{-1} @@ -170,13 +180,16 @@ def _mnorm_logp_internal( """ log2pi = 1.8378770664093453 - logging.log(logging.DEBUG, f"column precision trace ={tlinalg.trace(solve_col)}") - logging.log(logging.DEBUG, f"row precision trace ={tlinalg.trace(solve_row)}") + logging.log(logging.DEBUG, + f"column precision trace ={tlinalg.trace(solve_col)}") + logging.log(logging.DEBUG, + f"row precision trace ={tlinalg.trace(solve_row)}") logging.log(logging.DEBUG, f"row cov logdet ={logdet_row}") logging.log(logging.DEBUG, f"col cov logdet ={logdet_col}") denominator = ( - -rowsize * colsize * log2pi - colsize * logdet_row - rowsize * logdet_col + -rowsize * colsize * log2pi - colsize * logdet_row - + rowsize * logdet_col ) numerator = -tlinalg.trace(tf.matmul(solve_col, solve_row)) return 0.5 * (numerator + denominator) @@ -234,9 +247,9 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): col_cov: CovBase Column Covariance implementing the CovBase API (:math:`C` above). marg: tf.Tensor - Marginal factor (:math:`A` above). + Marginal factor (:math:`A` above). marg_cov: CovBase - Prior covariance implementing the CovBase API (:math:`Q` above). + Prior covariance implementing the CovBase API (:math:`Q` above). """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") colsize = tf.cast(tf.shape(input=x)[1], "float64") @@ -272,9 +285,9 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): col_cov: CovBase Column Covariance implementing the CovBase API (:math:`C` above). marg: tf.Tensor - Marginal factor (:math:`A` above). + Marginal factor (:math:`A` above). marg_cov: CovBase - Prior covariance implementing the CovBase API (:math:`Q` above). + Prior covariance implementing the CovBase API (:math:`Q` above). """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") @@ -298,36 +311,40 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): Consider the following partitioned matrix-normal density: .. math:: - \begin{bmatrix} - \operatorname{vec}\left[\mathbf{X}_{i j}\right] \\ - \operatorname{vec}\left[\mathbf{Y}_{i k}\right] - \end{bmatrix} \sim \mathcal{N}\left(0,\begin{bmatrix} - \Sigma_{j} \otimes \Sigma_{i} & \Sigma_{j k} \otimes \Sigma_{i} \\ - \Sigma_{k j} \otimes \Sigma_{i} & \Sigma_{k} \otimes \Sigma_{i} - \end{bmatrix}\right) - - Then we can write the conditional: + \\begin{bmatrix} + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] + \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} + \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} + \\end{bmatrix}\\right) + + Then we can write the conditional: .. math :: - \mathbf{X}^{\top} j i \mid \mathbf{Y}_{k i}^{\top} \sim \mathcal{M}\\ - \mathcal{N}\left(0, \Sigma_{j}-\Sigma_{j k} \Sigma_{k}^{-1} \Sigma_{k j},\\ - \Sigma_{i}\right) + \\mathbf{X}^{\\top} j i \\mid \\mathbf{Y}_{k i}^{\\top} + \\sim \\mathcal{M}\\ + \\mathcal{N}\\left(0, \\Sigma_{j}-\\Sigma_{j k} \\Sigma_{k}^{-1} + \\Sigma_{k j},\\ + \\Sigma_{i}\\right) This function efficiently computes the conditionals by unpacking some info in the covariance classes and then dispatching to - solve_det_conditional. + solve_det_conditional. Parameters --------------- x: tf.Tensor Observation tensor row_cov: CovBase - Row covariance (:math:`\Sigma_{i}` in the notation above). + Row covariance (:math:`\\Sigma_{i}` in the notation above). col_cov: CovBase - Column covariance (:math:`\Sigma_{j}` in the notation above). + Column covariance (:math:`\\Sigma_{j}` in the notation above). cond: tf.Tensor - Off-diagonal block of the partitioned covariance (:math:`\Sigma_{jk}` in the notation above). + Off-diagonal block of the partitioned covariance (:math:`\\Sigma_{jk}` + in the notation above). cond_cov: CovBase - Covariance of conditioning variable (:math:`\Sigma_{k}` in the notation above). + Covariance of conditioning variable (:math:`\\Sigma_{k}` in the + notation above). """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") @@ -349,36 +366,38 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): Consider the following partitioned matrix-normal density: .. math:: - \begin{bmatrix} - \operatorname{vec}\left[\mathbf{X}_{i j}\right] \\ - \operatorname{vec}\left[\mathbf{Y}_{i k}\right] - \end{bmatrix} \sim \mathcal{N}\left(0,\begin{bmatrix} - \Sigma_{j} \otimes \Sigma_{i} & \Sigma_{j k} \otimes \Sigma_{i} \\ - \Sigma_{k j} \otimes \Sigma_{i} & \Sigma_{k} \otimes \Sigma_{i} - \end{bmatrix}\right) - - Then we can write the conditional: + \\begin{bmatrix} + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] + \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} + \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} + \\end{bmatrix}\\right) + + Then we can write the conditional: .. math :: - \mathbf{X}_{i j} \mid \mathbf{Y}_{i k} \sim \mathcal{M}\\ - \mathcal{N}\left(0, \Sigma_{i}, \Sigma_{j}-\Sigma_{j k}\\ - \Sigma_{k}^{-1} \Sigma_{k j}\right) + \\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\ + \\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\ + \\Sigma_{k}^{-1} \\Sigma_{k j}\\right) This function efficiently computes the conditionals by unpacking some info in the covariance classes and then dispatching to - solve_det_conditional. + solve_det_conditional. Parameters --------------- x: tf.Tensor Observation tensor row_cov: CovBase - Row covariance (:math:`\Sigma_{i}` in the notation above). + Row covariance (:math:`\\Sigma_{i}` in the notation above). col_cov: CovBase - Column covariance (:math:`\Sigma_{j}` in the notation above). + Column covariance (:math:`\\Sigma_{j}` in the notation above). cond: tf.Tensor - Off-diagonal block of the partitioned covariance (:math:`\Sigma_{jk}` in the notation above). + Off-diagonal block of the partitioned covariance (:math:`\\Sigma_{jk}` + in the notation above). cond_cov: CovBase - Covariance of conditioning variable (:math:`\Sigma_{k}` in the notation above). + Covariance of conditioning variable (:math:`\\Sigma_{k}` in the + notation above). """ rowsize = tf.cast(tf.shape(input=x)[0], "float64") diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index d3e6d4354..b906a8a3f 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -13,7 +13,6 @@ flatten_cholesky_unique, ) -import tensorflow.compat.v1.logging as tflog from scipy.optimize import minimize __all__ = ["MNRSA"] @@ -62,7 +61,8 @@ class MNRSA(BaseEstimator): """ def __init__( - self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", optCtrl=None + self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B", + optCtrl=None ): self.n_T = time_cov.size @@ -86,7 +86,7 @@ def __init__( @property def L(self): """ - Cholesky factor of the RSA matrix. + Cholesky factor of the RSA matrix. """ return unflatten_cholesky_unique(self.L_flat) @@ -124,7 +124,8 @@ def fit(self, X, y, naive_init=True): self.naive_U_ = np.cov(m.coef_.T) naiveRSA_L = np.linalg.cholesky(self.naive_U_) self.L_flat = tf.Variable( - flatten_cholesky_unique(naiveRSA_L), name="L_flat", dtype="float64" + flatten_cholesky_unique(naiveRSA_L), name="L_flat", + dtype="float64" ) else: chol_flat_size = (self.n_c * (self.n_c + 1)) // 2 @@ -136,14 +137,16 @@ def fit(self, X, y, naive_init=True): self.train_variables.extend([self.L_flat]) - lossfn = lambda theta: -self.logp(X, Y) + def lossfn(theta): return -self.logp(X, Y) val_and_grad = make_val_and_grad(lossfn, self.train_variables) x0 = pack_trainable_vars(self.train_variables) - opt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method="L-BFGS-B") + opt_results = minimize(fun=val_and_grad, x0=x0, + jac=True, method="L-BFGS-B") - unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) + unpacked_theta = unpack_trainable_vars( + opt_results.x, self.train_variables) for var, val in zip(self.train_variables, unpacked_theta): var.assign(val) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 7b232c7b3..bac8263b2 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -1,5 +1,4 @@ import tensorflow as tf -import tensorflow_probability as tfp import numpy as np from sklearn.base import BaseEstimator from brainiak.matnormal.matnormal_likelihoods import matnorm_logp @@ -34,7 +33,8 @@ class MatnormalRegression(BaseEstimator): """ - def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", optCtrl=None): + def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", + optCtrl=None): self.optCtrl, self.optMethod = optCtrl, optimizer self.time_cov = time_cov @@ -69,7 +69,8 @@ def fit(self, X, y, naive_init=True): sigma_inv_x = self.time_cov.solve(X) sigma_inv_y = self.time_cov.solve(y) - beta_init = np.linalg.solve((X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) + beta_init = np.linalg.solve( + (X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y)) else: beta_init = np.random.randn(self.n_c, self.n_v) @@ -80,13 +81,15 @@ def fit(self, X, y, naive_init=True): self.train_variables.extend(self.time_cov.get_optimize_vars()) self.train_variables.extend(self.space_cov.get_optimize_vars()) - lossfn = lambda theta: -self.logp(X, y) + def lossfn(theta): return -self.logp(X, y) val_and_grad = make_val_and_grad(lossfn, self.train_variables) x0 = pack_trainable_vars(self.train_variables) - opt_results = minimize(fun=val_and_grad, x0=x0, jac=True, method="L-BFGS-B") + opt_results = minimize(fun=val_and_grad, x0=x0, + jac=True, method="L-BFGS-B") - unpacked_theta = unpack_trainable_vars(opt_results.x, self.train_variables) + unpacked_theta = unpack_trainable_vars( + opt_results.x, self.train_variables) for var, val in zip(self.train_variables, unpacked_theta): var.assign(val) @@ -129,7 +132,8 @@ def calibrate(self, Y): # Y Sigma_s^{-1} B' Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) # (B Sigma_s^{-1} B')^{-1} - B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp).eval(session=self.sess) + B_Sigma_Btrp = tf.matmul( + self.beta, Sigma_s_btrp).eval(session=self.sess) X_test = np.linalg.solve(B_Sigma_Btrp.T, Y_Sigma_Btrp.T).T diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index ea5b88698..366a8692f 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -49,9 +49,9 @@ def flatten_cholesky_unique(L): def unflatten_cholesky_unique(L_flat): """ - Converts a vector of elements into a triangular matrix + Converts a vector of elements into a triangular matrix (Cholesky factor). Exponentiates diagonal to make - parameterizaation unique. Inverse of flatten_cholesky_unique. + parameterizaation unique. Inverse of flatten_cholesky_unique. """ L = tfp.math.fill_triangular(L_flat) # exp diag for unique parameterization @@ -69,14 +69,15 @@ def pack_trainable_vars(trainable_vars): def unpack_trainable_vars(x, trainable_vars): """ - Unpack trainable vars from a single vector as + Unpack trainable vars from a single vector as used/returned by scipy.optimize """ sizes = [tv.shape for tv in trainable_vars] idxs = [np.prod(sz) for sz in sizes] flatvars = tf.split(x, idxs) - return [tf.reshape(fv, tv.shape) for fv, tv in zip(flatvars, trainable_vars)] + return [tf.reshape(fv, tv.shape) for fv, tv in zip(flatvars, + trainable_vars)] def make_val_and_grad(lossfn, train_vars): diff --git a/brainiak/utils/kronecker_solvers.py b/brainiak/utils/kronecker_solvers.py index d39be648d..7ed9a7c9d 100644 --- a/brainiak/utils/kronecker_solvers.py +++ b/brainiak/utils/kronecker_solvers.py @@ -41,7 +41,8 @@ def tf_solve_lower_triangular_kron(L, y): t = xinb / L[0][i, i] xinb = tf_solve_lower_triangular_kron(L[1:], t) xina = xina - tf.reshape( - tf.tile(tf.slice(L[0], [i + 1, i], [na - i - 1, 1]), [1, nb * col]), + tf.tile(tf.slice(L[0], [i + 1, i], + [na - i - 1, 1]), [1, nb * col]), [(na - i - 1) * nb, col], ) * tf.reshape( tf.tile(tf.reshape(t, [-1, 1]), [na - i - 1, 1]), @@ -90,9 +91,11 @@ def tf_solve_upper_triangular_kron(L, y): t = xinb / L[0][i, i] xinb = tf_solve_upper_triangular_kron(L[1:], t) xt = xt - tf.reshape( - tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), [1, nb * col]), + tf.tile(tf.transpose(a=tf.slice( + L[0], [i, 0], [1, i])), [1, nb * col]), [i * nb, col], - ) * tf.reshape(tf.tile(tf.reshape(t, [-1, 1]), [i, 1]), [i * nb, col]) + ) * tf.reshape(tf.tile(tf.reshape(t, [-1, 1]), [i, 1]), + [i * nb, col]) x = tf.concat(axis=0, values=[xt, xinb, xina]) return x @@ -176,19 +179,23 @@ def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False): zero = tf.constant(0, dtype=tf.int32) mask_mat = tf.compat.v1.where( tf.not_equal( - tf.matmul(tf.reshape(mask, [-1, 1]), tf.reshape(mask, [1, -1])), zero + tf.matmul(tf.reshape(mask, [-1, 1]), + tf.reshape(mask, [1, -1])), zero ) ) q = tf.cast( - tf.sqrt(tf.cast(tf.shape(input=mask_mat)[0], dtype=tf.float64)), dtype=tf.int32 + tf.sqrt(tf.cast(tf.shape(input=mask_mat)[0], dtype=tf.float64)), + dtype=tf.int32 ) L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q]) maskindex = tf.compat.v1.where(tf.not_equal(mask, zero)) y_masked = tf.gather_nd(y, maskindex) - x_s1 = tf.linalg.triangular_solve(L_masked, y_masked, lower=lower, adjoint=adjoint) - x = tf.scatter_nd(maskindex, x_s1, tf.cast(tf.shape(input=y), dtype=tf.int64)) + x_s1 = tf.linalg.triangular_solve( + L_masked, y_masked, lower=lower, adjoint=adjoint) + x = tf.scatter_nd(maskindex, x_s1, tf.cast( + tf.shape(input=y), dtype=tf.int64)) return x @@ -220,7 +227,8 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): """ n = len(L) if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=False) + return tf_masked_triangular_solve(L[0], y, mask, lower=True, + adjoint=False) else: x = y na = L[0].get_shape().as_list()[0] @@ -245,7 +253,8 @@ def tf_solve_lower_triangular_masked_kron(L, y, mask): xinb = tf_solve_lower_triangular_kron(L[1:], t) t_masked = t xina = xina - tf.reshape( - tf.tile(tf.slice(L[0], [i + 1, i], [na - i - 1, 1]), [1, nb * col]), + tf.tile(tf.slice(L[0], [i + 1, i], + [na - i - 1, 1]), [1, nb * col]), [(na - i - 1) * nb, col], ) * tf.reshape( tf.tile(tf.reshape(t_masked, [-1, 1]), [na - i - 1, 1]), @@ -284,7 +293,8 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): """ n = len(L) if n == 1: - return tf_masked_triangular_solve(L[0], y, mask, lower=True, adjoint=True) + return tf_masked_triangular_solve(L[0], y, mask, lower=True, + adjoint=True) else: x = y na = L[0].get_shape().as_list()[0] @@ -309,7 +319,8 @@ def tf_solve_upper_triangular_masked_kron(L, y, mask): t_masked = t xt = xt - tf.reshape( - tf.tile(tf.transpose(a=tf.slice(L[0], [i, 0], [1, i])), [1, nb * col]), + tf.tile(tf.transpose(a=tf.slice( + L[0], [i, 0], [1, i])), [1, nb * col]), [i * nb, col], ) * tf.reshape( tf.tile(tf.reshape(t_masked, [-1, 1]), [i, 1]), [i * nb, col] From 4fcf5570f7bf51e65683f0da7ca9d359862bd074 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 13 Aug 2020 21:01:10 -0700 Subject: [PATCH 56/84] correctly pass optimizer args --- brainiak/matnormal/mnrsa.py | 8 +++++--- brainiak/matnormal/regression.py | 7 +++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index b906a8a3f..c9c2d71c9 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -56,7 +56,7 @@ class MNRSA(BaseEstimator): optimizer : string, Default :'L-BFGS' Name of scipy optimizer to use. optCtrl : dict, default: None - Dict of options for optimizer (e.g. {'maxiter': 100}) + Additional arguments to pass to scipy.optimize.minimize. """ @@ -69,7 +69,9 @@ def __init__( self.n_V = space_cov.size self.n_nureg = n_nureg - self.optCtrl, self.optMethod = optCtrl, optimizer + self.optMethod = optimizer + if optCtrl is None: + self.optCtrl = {} self.X_0 = tf.Variable( tf.random.normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0" @@ -143,7 +145,7 @@ def lossfn(theta): return -self.logp(X, Y) x0 = pack_trainable_vars(self.train_variables) opt_results = minimize(fun=val_and_grad, x0=x0, - jac=True, method="L-BFGS-B") + jac=True, method=self.optMethod, **self.optCtrl) unpacked_theta = unpack_trainable_vars( opt_results.x, self.train_variables) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index bac8263b2..d0b43e1e4 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -36,7 +36,10 @@ class MatnormalRegression(BaseEstimator): def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", optCtrl=None): - self.optCtrl, self.optMethod = optCtrl, optimizer + self.optMethod = optimizer + if optCtrl is None: + self.optCtrl = {} + self.time_cov = time_cov self.space_cov = space_cov @@ -86,7 +89,7 @@ def lossfn(theta): return -self.logp(X, y) x0 = pack_trainable_vars(self.train_variables) opt_results = minimize(fun=val_and_grad, x0=x0, - jac=True, method="L-BFGS-B") + jac=True, method=self.optMethod, **self.optCtrl) unpacked_theta = unpack_trainable_vars( opt_results.x, self.train_variables) From a1fe394524b9cf25f69cf8971820fdfb295c30ee Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 13 Aug 2020 21:06:04 -0700 Subject: [PATCH 57/84] Make test linter happy --- tests/matnormal/test_cov.py | 10 ++++++---- tests/matnormal/test_matnormal_logp.py | 6 ++++-- tests/matnormal/test_matnormal_logp_conditional.py | 6 ++++-- tests/matnormal/test_matnormal_logp_marginal.py | 6 ++++-- tests/matnormal/test_matnormal_regression.py | 4 ++-- tests/matnormal/test_matnormal_utils.py | 3 ++- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 515d6c96a..278939e83 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -133,7 +133,8 @@ def test_CovDiagonal_initialized(): def test_CovDiagonalGammaPrior(): cov_np = np.diag(np.exp(np.random.normal(size=m))) - cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, beta=1e-10) + cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, + beta=1e-10) ig = invgamma(1.5, scale=1e-10) @@ -248,7 +249,8 @@ def test_Cov3FactorMaskedKron(): L1 = (cov.L[0]).numpy() L2 = (cov.L[1]).numpy() L3 = (cov.L[2]).numpy() - cov_np_factor = np.kron(L1, np.kron(L2, L3))[np.ix_(mask_indices, mask_indices)] + cov_np_factor = np.kron(L1, np.kron(L2, L3))[ + np.ix_(mask_indices, mask_indices)] cov_np = np.dot(cov_np_factor, cov_np_factor.transpose()) logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X[mask_indices, :], cov_np) @@ -260,7 +262,8 @@ def test_Cov3FactorMaskedKron(): atol=atol, ) assert_allclose( - sinvx_np, cov.solve(X_tf).numpy()[mask_indices, :], rtol=rtol, atol=atol + sinvx_np, cov.solve(X_tf).numpy()[ + mask_indices, :], rtol=rtol, atol=atol ) @@ -285,4 +288,3 @@ def test_CovAR1_scan_onsets(): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) - diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index 8e9572e12..70a083753 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -28,7 +28,8 @@ def test_against_scipy_mvn_row(): rowcov_np = rowcov._cov - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp(X_tf, rowcov, colcov) assert_allclose(scipy_answer, tf_answer, rtol=rtol) @@ -42,6 +43,7 @@ def test_against_scipy_mvn_col(): colcov_np = colcov._cov - scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X, np.zeros([n]), colcov_np)) tf_answer = matnorm_logp(X_tf, rowcov, colcov) assert_allclose(scipy_answer, tf_answer, rtol=rtol) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 5bc7927ee..9f479e5b2 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -45,7 +45,8 @@ def test_against_scipy_mvn_row_conditional(): rowcov_np = rowcov._cov - A.dot(np.linalg.inv(Q_np)).dot((A.T)) - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp_conditional_row(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer, rtol=rtol) @@ -72,7 +73,8 @@ def test_against_scipy_mvn_col_conditional(): colcov_np = colcov._cov - A.T.dot(np.linalg.inv(Q_np)).dot((A)) - scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X, np.zeros([n]), colcov_np)) tf_answer = matnorm_logp_conditional_col(X_tf, rowcov, colcov, A_tf, Q) diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index fb95edc43..b5bcb6e87 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -39,7 +39,8 @@ def test_against_scipy_mvn_row_marginal(): rowcov_np = rowcov._cov + A.dot(Q_np).dot(A.T) - scipy_answer = np.sum(multivariate_normal.logpdf(X.T, np.zeros([m]), rowcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X.T, np.zeros([m]), rowcov_np)) tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer, rtol=rtol) @@ -61,7 +62,8 @@ def test_against_scipy_mvn_col_marginal(): colcov_np = colcov._cov + A.T.dot(Q_np).dot(A) - scipy_answer = np.sum(multivariate_normal.logpdf(X, np.zeros([n]), colcov_np)) + scipy_answer = np.sum(multivariate_normal.logpdf( + X, np.zeros([n]), colcov_np)) tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, A_tf, Q) assert_allclose(scipy_answer, tf_answer, rtol=rtol) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 7ea32ec49..0af84f14a 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -1,6 +1,5 @@ import numpy as np from scipy.stats import norm, wishart, pearsonr -import tensorflow as tf from brainiak.matnormal.covs import ( CovIdentity, @@ -80,7 +79,8 @@ def test_matnorm_regression_optimizerChoice(): row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedInvCholesky(size=p) - model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov, optimizer="CG") + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov, + optimizer="CG") model.fit(X, Y, naive_init=False) diff --git a/tests/matnormal/test_matnormal_utils.py b/tests/matnormal/test_matnormal_utils.py index c52ccaf3e..46fa2c691 100644 --- a/tests/matnormal/test_matnormal_utils.py +++ b/tests/matnormal/test_matnormal_utils.py @@ -5,7 +5,8 @@ def test_pack_unpack(): shapes = [[2, 3], [3], [3, 4, 2], [1, 5]] - mats = [tf.random.stateless_normal(shape=shape, seed=[0, 0]) for shape in shapes] + mats = [tf.random.stateless_normal( + shape=shape, seed=[0, 0]) for shape in shapes] flatmats = pack_trainable_vars(mats) unflatmats = unpack_trainable_vars(flatmats, mats) for mat_in, mat_out in zip(mats, unflatmats): From 495c656f5b448bb75b20a3a9c6111ad0c0d7ea79 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 13 Aug 2020 23:00:55 -0700 Subject: [PATCH 58/84] maybe this will make travis use a recent TF? --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 16ded548c..29f72781e 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,7 @@ def finalize_options(self): 'psutil', 'nibabel', 'typing', - 'tensorflow', + 'tensorflow>=2.3', # required for tensorflow_probability 'tensorflow_probability', 'joblib', 'wheel', # See https://github.com/astropy/astropy-helpers/issues/501 From c9a568827441607811a527eecc09c652aa71685a Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 14 Aug 2020 22:29:12 -0700 Subject: [PATCH 59/84] workaround to be able to use pymanopt (for theano) in the presence of TF>1 --- brainiak/funcalign/sssrm.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/brainiak/funcalign/sssrm.py b/brainiak/funcalign/sssrm.py index 1fe3bd073..b43e94288 100644 --- a/brainiak/funcalign/sssrm.py +++ b/brainiak/funcalign/sssrm.py @@ -40,6 +40,8 @@ from pymanopt.solvers import ConjugateGradient from pymanopt import Problem from pymanopt.manifolds import Stiefel +import pymanopt + import gc from brainiak.utils import utils @@ -57,6 +59,10 @@ # https://github.com/pymc-devs/pymc3/pull/3767 theano.config.gcc.cxxflags = "-Wno-c++11-narrowing" +# FIXME workaround for pymanopt only working with tensorflow 1. +# We don't use pymanopt+TF so we just let pymanopt pretend TF doesn't exist. +pymanopt.tools.autodiff._tensorflow.tf = None + class SSSRM(BaseEstimator, ClassifierMixin, TransformerMixin): """Semi-Supervised Shared Response Model (SS-SRM) From 570f4a2d9f762809b85d91e58f8332809dfccfac Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 21 Aug 2020 21:34:08 -0700 Subject: [PATCH 60/84] doc build fixes --- brainiak/matnormal/matnormal_likelihoods.py | 44 ++++++++++----------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index cc89fcb8c..39e005a1b 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -311,21 +311,21 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): Consider the following partitioned matrix-normal density: .. math:: - \\begin{bmatrix} - \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ - \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] - \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} - \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ - \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} - \\end{bmatrix}\\right) + \\begin{bmatrix} + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] + \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} + \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} + \\end{bmatrix}\\right) Then we can write the conditional: .. math :: - \\mathbf{X}^{\\top} j i \\mid \\mathbf{Y}_{k i}^{\\top} - \\sim \\mathcal{M}\\ - \\mathcal{N}\\left(0, \\Sigma_{j}-\\Sigma_{j k} \\Sigma_{k}^{-1} - \\Sigma_{k j},\\ - \\Sigma_{i}\\right) + \\mathbf{X}^{\\top} j i \\mid \\mathbf{Y}_{k i}^{\\top} + \\sim \\mathcal{M}\\ + \\mathcal{N}\\left(0, \\Sigma_{j}-\\Sigma_{j k} \\Sigma_{k}^{-1} + \\Sigma_{k j},\\ + \\Sigma_{i}\\right) This function efficiently computes the conditionals by unpacking some info in the covariance classes and then dispatching to @@ -366,19 +366,19 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): Consider the following partitioned matrix-normal density: .. math:: - \\begin{bmatrix} - \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ - \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] - \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} - \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ - \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} - \\end{bmatrix}\\right) + \\begin{bmatrix} + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] + \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} + \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} + \\end{bmatrix}\\right) Then we can write the conditional: .. math :: - \\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\ - \\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\ - \\Sigma_{k}^{-1} \\Sigma_{k j}\\right) + \\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\ + \\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\ + \\Sigma_{k}^{-1} \\Sigma_{k j}\\right) This function efficiently computes the conditionals by unpacking some info in the covariance classes and then dispatching to From 5f04a82daa8ef5de034428fd1d38aed7cf5aca9d Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 21 Aug 2020 22:04:38 -0700 Subject: [PATCH 61/84] doc cleanup and removal of unused functions --- brainiak/matnormal/__init__.py | 23 +++--- brainiak/matnormal/covs.py | 44 +++++++---- brainiak/matnormal/matnormal_likelihoods.py | 86 ++++++++++++--------- brainiak/matnormal/mnrsa.py | 2 +- brainiak/matnormal/regression.py | 2 +- brainiak/matnormal/utils.py | 54 +++++++++---- 6 files changed, 127 insertions(+), 84 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index e167ac329..1957696cd 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -1,6 +1,6 @@ -"""The matrix variate normal distribution, -with conditional and marginal identities -========================================================================================== +""" +Some properties of the matrix-variate normal distribution +--------------------------------------------------------- .. math:: \\DeclareMathOperator{\\Tr}{Tr} @@ -37,7 +37,7 @@ :math:`X \\sim \\mathcal{MN}(M,R,C)` then :math:`\\mathrm{vec}(X)\\sim\\mathcal{N}(\\mathrm{vec}(M), C \\otimes R)`, where :math:`\\mathrm{vec}(\\cdot)` is the vectorization operator and -:math:`otimes` is the Kronecker product. If we think of X as a matrix of TRs by +:math:`\\otimes` is the Kronecker product. If we think of X as a matrix of TRs by voxels in the fMRI setting, then this model assumes that each voxel has the same TR-by-TR covariance structure (represented by the matrix R), and each volume has the same spatial covariance (represented by the matrix C). @@ -50,7 +50,7 @@ .. math:: \\log p(X\\mid \\M,\\R, \\C) = -2\\log mn - m \\log|\\C| - n \\log|\\R| - - \\Tr\\left[\\C\\inv(\\X-\\M)\\trp\\R\\inv(\\X-\\M)\\right] + \\Tr\\left[\\C\\inv(\\X-\\M)\\trp\\R\\inv(\\X-\\M)\\right] Here :math:`X` and :math:`M` are both :math:`m\\times n` matrices, :math:`\\R` @@ -64,8 +64,7 @@ value and gradient, and simple verbose outputs. It also provides an interface for noise covariances (`CovBase`). Any class that follows the interface can be used as a noise covariance in any of the matrix normal models. The -package includes a variety of noise covariances to work with, as well as an -interface to use any of the kernels in the `GPflow` package. +package includes a variety of noise covariances to work with. Matrix normal marginals ------------------------- @@ -166,7 +165,7 @@ `MatnormModelBase.matnorm_logp_marginal_col`. Partitioned matrix normal conditionals --------------------------------------------------- +-------------------------------------- Here we extend the multivariate gaussian conditional identity to matrix normals. This is used for prediction in some models. Below, we @@ -223,17 +222,17 @@ following matrix variate gaussian: .. math:: - \\X_{ij} \\mid \\Y_{ik}\\sim \\mathcal{MN}(&\\A_{ij} + + \\X_{ij} \\mid \\Y_{ik}\\sim \\mathcal{MN}(\\A_{ij} + (\\Y_{ik}-\\B_{ik})\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i, - \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}) + \\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}) The conditional in the other direction can be written by working through the same algebra: .. math:: - \\Y_{ik} \\mid \\X_{ij}\\sim \\mathcal{MN}(&\\B_{ik} +(\\X_{ij}- + \\Y_{ik} \\mid \\X_{ij}\\sim \\mathcal{MN}(\\B_{ik} +(\\X_{ij}- \\A_{ij})\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i, - \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}) + \\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}) Finally, vertical rather than horizontal concatenation (yielding a partitioned row rather than column covariance) can be written by recognizing the behavior diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index 2022e2783..de6f41a25 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -66,7 +66,8 @@ def logdet(self): @abc.abstractmethod def solve(self, X): - """Given this covariance and some X, compute :math:`Sigma^{-1} * x` + """Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute :math:`\\Sigma^{-1}x` """ pass @@ -103,7 +104,8 @@ def get_optimize_vars(self): return [] def solve(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute :math:`\\Sigma^{-1}x` """ return X @@ -218,12 +220,13 @@ def _prec(self): def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to - fit this covariance + fit this covariance """ return [self.rho_unc, self.log_sigma] def solve(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute :math:`\\Sigma^{-1}x` """ return tf.matmul(self._prec, X) @@ -261,7 +264,8 @@ def get_optimize_vars(self): return [self.log_var] def solve(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute :math:`\\Sigma^{-1}x` Parameters ---------- @@ -306,7 +310,8 @@ def get_optimize_vars(self): return [self.logprec] def solve(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` + """Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute :math:`\\Sigma^{-1}x` Parameters ---------- @@ -386,8 +391,9 @@ def get_optimize_vars(self): return [self.L_flat] def solve(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` - using cholesky solve + """Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute :math:`\\Sigma^{-1}x` (using cholesky solve) + Parameters ---------- X: tf.Tensor @@ -399,9 +405,9 @@ def solve(self, X): class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky): """Unconstrained noise covariance parameterized in terms of its - cholesky factor. - Regularized using the trick from Chung et al. 2015 such that as the - covariance approaches singularity, the likelihood goes to 0. + cholesky factor. Regularized using the trick from + Chung et al. 2015 such that as the covariance approaches + singularity, the likelihood goes to 0. References ---------- @@ -473,13 +479,14 @@ def logdet(self): def get_optimize_vars(self): """ Returns a list of tf variables that need to get optimized to fit - this covariance + this covariance """ return [self.Linv_flat] def solve(self, X): - """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` - using cholesky solve + """Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute :math:`\\Sigma^{-1}x` (using cholesky solve) + Parameters ---------- X: tf.Tensor @@ -593,9 +600,12 @@ def logdet(self): return 2.0 * logdetfinal def solve(self, X): - """ Given this Sigma and some X, compute Sigma^{-1} * x using - traingular solves with the cholesky factors. - Do 2 triangular solves - L L^T x = y as L z = y and L^T x = z + """ Given this covariance :math:`\\Sigma` and some input :math:`X`, + compute Sigma^{-1} * x using traingular solves with the cholesky + factors. + + Specifically, we solve :math:`L L^T x = y` by solving + :math:`L z = y` and :math:`L^T x = z`. Parameters ---------- diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 39e005a1b..57c667d56 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -27,15 +27,17 @@ def _condition(X): def solve_det_marginal(x, sigma, A, Q): """ Use matrix inversion lemma for the solve: + .. math:: - (\\Sigma + AQA')^{-1} X =\\ - (\\Sigma^{-1} - \\Sigma^{-1} A (Q^{-1} + - A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X + (\\Sigma + AQA')^{-1} X =\\ + (\\Sigma^{-1} - \\Sigma^{-1} A (Q^{-1} + + A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X Use matrix determinant lemma for determinant: + .. math:: - \\log|(\\Sigma + AQA')| = \\log|Q^{-1} + A' \\Sigma^{-1} A| - + \\log|Q| + \\log|\\Sigma| + \\log|(\\Sigma + AQA')| = \\log|Q^{-1} + A' \\Sigma^{-1} A| + + \\log|Q| + \\log|\\Sigma| Parameters ---------- @@ -108,15 +110,17 @@ def solve_det_marginal(x, sigma, A, Q): def solve_det_conditional(x, sigma, A, Q): """ Use matrix inversion lemma for the solve: + .. math:: - (\\Sigma - AQ^{-1}A')^{-1} X =\\ - (\\Sigma^{-1} + \\Sigma^{-1} A (Q - - A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X + (\\Sigma - AQ^{-1}A')^{-1} X =\\ + (\\Sigma^{-1} + \\Sigma^{-1} A (Q - + A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X Use matrix determinant lemma for determinant: + .. math:: - \\log|(\\Sigma - AQ^{-1}A')| = - \\log|Q - A' \\Sigma^{-1} A| - \\log|Q| + \\log|\\Sigma| + \\log|(\\Sigma - AQ^{-1}A')| = + \\log|Q - A' \\Sigma^{-1} A| - \\log|Q| + \\log|\\Sigma| Parameters ---------- @@ -236,7 +240,8 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): Y \\sim \\mathcal{MN}(0, R + AQA', C) This function efficiently computes the marginals by unpacking some - info in the covariance classes and then dispatching to solve_det_marginal. + info in the covariance classes and then dispatching to + `solve_det_marginal`. Parameters --------------- @@ -274,7 +279,8 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): Y \\sim \\mathcal{MN}(0, R, C + A'QA) This function efficiently computes the marginals by unpacking some - info in the covariance classes and then dispatching to solve_det_marginal. + info in the covariance classes and then dispatching to + `solve_det_marginal`. Parameters --------------- @@ -310,26 +316,28 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): Log likelihood for centered conditional matrix-variate normal density. Consider the following partitioned matrix-normal density: + .. math:: - \\begin{bmatrix} - \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ - \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] - \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} - \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ - \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} - \\end{bmatrix}\\right) + \\begin{bmatrix} + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] + \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} + \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} + \\end{bmatrix}\\right) Then we can write the conditional: - .. math :: - \\mathbf{X}^{\\top} j i \\mid \\mathbf{Y}_{k i}^{\\top} - \\sim \\mathcal{M}\\ - \\mathcal{N}\\left(0, \\Sigma_{j}-\\Sigma_{j k} \\Sigma_{k}^{-1} - \\Sigma_{k j},\\ - \\Sigma_{i}\\right) + + .. math:: + \\mathbf{X}^{\\top} j i \\mid \\mathbf{Y}_{k i}^{\\top} + \\sim \\mathcal{M}\\ + \\mathcal{N}\\left(0, \\Sigma_{j}-\\Sigma_{j k} \\Sigma_{k}^{-1} + \\Sigma_{k j},\\ + \\Sigma_{i}\\right) This function efficiently computes the conditionals by unpacking some info in the covariance classes and then dispatching to - solve_det_conditional. + `solve_det_conditional`. Parameters --------------- @@ -365,24 +373,26 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): Log likelihood for centered conditional matrix-variate normal density. Consider the following partitioned matrix-normal density: + .. math:: - \\begin{bmatrix} - \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ - \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] - \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} - \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ - \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} - \\end{bmatrix}\\right) + \\begin{bmatrix} + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] + \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} + \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} + \\end{bmatrix}\\right) Then we can write the conditional: - .. math :: - \\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\ - \\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\ - \\Sigma_{k}^{-1} \\Sigma_{k j}\\right) + + .. math:: + \\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\ + \\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\ + \\Sigma_{k}^{-1} \\Sigma_{k j}\\right) This function efficiently computes the conditionals by unpacking some info in the covariance classes and then dispatching to - solve_det_conditional. + `solve_det_conditional`. Parameters --------------- diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index c9c2d71c9..eb96c32ba 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -44,7 +44,7 @@ class MNRSA(BaseEstimator): .. math:: Y \\sim \\mathcal{MN}(0, \\Sigma_t + XLL^{\\top}X^{\\top}+ - X_0X_0^{\\top}, \\Sigma_s) + X_0X_0^{\\top}, \\Sigma_s)\\ U = LL^{\\top} Parameters diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index d0b43e1e4..8137ddb07 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -17,7 +17,7 @@ class MatnormalRegression(BaseEstimator): in the presence of both spatial and temporal covariance. ..math:: - Y \\sim \\mathcal{MN}(X\beta, time_cov, space_cov) + Y \\sim \\mathcal{MN}(X\beta, time_cov, space_cov) Parameters ---------- diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 366a8692f..cf367d956 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -6,40 +6,64 @@ def rmn(rowcov, colcov): - """ generate random draws from a zero-mean matrix-normal distribution """ + """ + Generate random draws from a zero-mean matrix-normal distribution. + + Parameters + ----------- + rowcov : np.ndarray + Row covariance (assumed to be positive definite) + colcov : np.ndarray + Column covariance (assumed to be positive definite) + """ + Z = norm.rvs(size=(rowcov.shape[0], colcov.shape[0])) return cholesky(rowcov).dot(Z).dot(cholesky(colcov)) def xx_t(x): - """ x * x' """ + """ + Outer product + :math:`xx^{\\top}` + + Parameters + ----------- + x : tf.Tensor + + """ return tf.matmul(x, x, transpose_b=True) def x_tx(x): - """ x' * x """ - return tf.matmul(x, x, transpose_a=True) + """Inner product + :math:`x^{\\top} x` + Parameters + ----------- + x : tf.Tensor -def quad_form(x, y): - """ x' * y * x """ - return tf.matmul(x, tf.matmul(y, x), transpose_a=True) + """ + return tf.matmul(x, x, transpose_a=True) def scaled_I(x, size): - """ x * I_{size} """ + """Scaled identity matrix + :math:`x * I_{size}` + + Parameters + ------------ + x: float or coercable to float + Scale to multiply the identity matrix by + size: int or otherwise coercable to a size + Dimension of the scaled identity matrix to return + """ return tf.linalg.tensor_diag(tf.ones([size], dtype=tf.float64) * x) -def quad_form_trp(x, y): - """ x * y * x' """ - return tf.matmul(x, tf.matmul(y, x, transpose_b=True)) - - def flatten_cholesky_unique(L): """ Flattens nonzero-elements Cholesky (triangular) factor - into a vector, and logs diagonal to make parameterizaation + into a vector, and logs diagonal to make parameterization unique. Inverse of unflatten_cholesky_unique. """ L[np.diag_indices_from(L)] = np.log(np.diag(L)) @@ -51,7 +75,7 @@ def unflatten_cholesky_unique(L_flat): """ Converts a vector of elements into a triangular matrix (Cholesky factor). Exponentiates diagonal to make - parameterizaation unique. Inverse of flatten_cholesky_unique. + parameterization unique. Inverse of flatten_cholesky_unique. """ L = tfp.math.fill_triangular(L_flat) # exp diag for unique parameterization From 8a41b9d9f865e2b21831314aacf3108fcd49d986 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sat, 22 Aug 2020 10:21:05 -0700 Subject: [PATCH 62/84] fix linter issues introduced by fixing docbuild issues --- brainiak/matnormal/__init__.py | 7 +++---- brainiak/matnormal/matnormal_likelihoods.py | 4 ++-- brainiak/matnormal/utils.py | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index 1957696cd..8b0b3c585 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -37,8 +37,8 @@ :math:`X \\sim \\mathcal{MN}(M,R,C)` then :math:`\\mathrm{vec}(X)\\sim\\mathcal{N}(\\mathrm{vec}(M), C \\otimes R)`, where :math:`\\mathrm{vec}(\\cdot)` is the vectorization operator and -:math:`\\otimes` is the Kronecker product. If we think of X as a matrix of TRs by -voxels in the fMRI setting, then this model assumes that each voxel has the +:math:`\\otimes` is the Kronecker product. If we think of X as a matrix of TRs +by voxels in the fMRI setting, then this model assumes that each voxel has the same TR-by-TR covariance structure (represented by the matrix R), and each volume has the same spatial covariance (represented by the matrix C). This assumption allows us to model both covariances separately. @@ -52,7 +52,6 @@ \\log p(X\\mid \\M,\\R, \\C) = -2\\log mn - m \\log|\\C| - n \\log|\\R| - \\Tr\\left[\\C\\inv(\\X-\\M)\\trp\\R\\inv(\\X-\\M)\\right] - Here :math:`X` and :math:`M` are both :math:`m\\times n` matrices, :math:`\\R` is :math:`m\\times m` and :math:`\\C` is :math:`n\\times n`. @@ -64,7 +63,7 @@ value and gradient, and simple verbose outputs. It also provides an interface for noise covariances (`CovBase`). Any class that follows the interface can be used as a noise covariance in any of the matrix normal models. The -package includes a variety of noise covariances to work with. +package includes a variety of noise covariances to work with. Matrix normal marginals ------------------------- diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 57c667d56..5c75d79e4 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -327,7 +327,7 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): \\end{bmatrix}\\right) Then we can write the conditional: - + .. math:: \\mathbf{X}^{\\top} j i \\mid \\mathbf{Y}_{k i}^{\\top} \\sim \\mathcal{M}\\ @@ -373,7 +373,7 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): Log likelihood for centered conditional matrix-variate normal density. Consider the following partitioned matrix-normal density: - + .. math:: \\begin{bmatrix} \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index cf367d956..4df851022 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -49,7 +49,7 @@ def x_tx(x): def scaled_I(x, size): """Scaled identity matrix :math:`x * I_{size}` - + Parameters ------------ x: float or coercable to float From b77ddf3c3da7c73fc40fdb0f3ae8b6e5a0edf766 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sat, 22 Aug 2020 10:42:00 -0700 Subject: [PATCH 63/84] remove hard tf2.3 requirement (tensorflow_probability deps should resolve this automatically) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fef03ecfa..3adb64600 100644 --- a/setup.py +++ b/setup.py @@ -139,7 +139,7 @@ def finalize_options(self): 'psutil', 'nibabel', 'typing', - 'tensorflow>=2.3', # required for tensorflow_probability + 'tensorflow', 'tensorflow_probability', 'joblib', 'wheel', # See https://github.com/astropy/astropy-helpers/issues/501 From c0af0e6ea3007e88877d43f30a9f76c639a06ff7 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sat, 22 Aug 2020 11:26:14 -0700 Subject: [PATCH 64/84] add reproducible rng fixture, improve test coverage, fixup linear decoding --- brainiak/matnormal/regression.py | 17 +++++----- tests/conftest.py | 11 +++++++ tests/matnormal/test_cov.py | 28 ++++++++-------- tests/matnormal/test_matnormal_logp.py | 4 +-- .../test_matnormal_logp_conditional.py | 4 +-- .../matnormal/test_matnormal_logp_marginal.py | 4 +-- tests/matnormal/test_matnormal_regression.py | 32 +++++++++++++++---- tests/matnormal/test_matnormal_rsa.py | 2 +- tests/matnormal/test_matnormal_utils.py | 2 +- 9 files changed, 67 insertions(+), 37 deletions(-) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 8137ddb07..d852a5657 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -33,8 +33,7 @@ class MatnormalRegression(BaseEstimator): """ - def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", - optCtrl=None): + def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", optCtrl=None): self.optMethod = optimizer if optCtrl is None: @@ -84,12 +83,15 @@ def fit(self, X, y, naive_init=True): self.train_variables.extend(self.time_cov.get_optimize_vars()) self.train_variables.extend(self.space_cov.get_optimize_vars()) - def lossfn(theta): return -self.logp(X, y) + def lossfn(theta): + return -self.logp(X, y) + val_and_grad = make_val_and_grad(lossfn, self.train_variables) x0 = pack_trainable_vars(self.train_variables) - opt_results = minimize(fun=val_and_grad, x0=x0, - jac=True, method=self.optMethod, **self.optCtrl) + opt_results = minimize( + fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, **self.optCtrl + ) unpacked_theta = unpack_trainable_vars( opt_results.x, self.train_variables) @@ -133,10 +135,9 @@ def calibrate(self, Y): # Sigma_s^{-1} B' Sigma_s_btrp = self.space_cov.solve(tf.transpose(a=self.beta)) # Y Sigma_s^{-1} B' - Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).eval(session=self.sess) + Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).numpy() # (B Sigma_s^{-1} B')^{-1} - B_Sigma_Btrp = tf.matmul( - self.beta, Sigma_s_btrp).eval(session=self.sess) + B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp).numpy() X_test = np.linalg.solve(B_Sigma_Btrp.T, Y_Sigma_Btrp.T).T diff --git a/tests/conftest.py b/tests/conftest.py index 8b9888bad..de5ec6b56 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,16 @@ from mpi4py import MPI +import pytest +import numpy +import random +import tensorflow def pytest_configure(config): config.option.xmlpath = "junit-{}.xml".format(MPI.COMM_WORLD.Get_rank()) + + +@pytest.fixture +def seeded_rng(): + random.seed(0) + numpy.random.seed(0) + tensorflow.random.set_seed(0) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 278939e83..33b8274e6 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -66,7 +66,7 @@ def logdet_sinv_np_mask(X, sigma, mask): eye = tf.eye(m, dtype=tf.float64) -def test_CovConstant(): +def test_CovConstant(seeded_rng): cov_np = wishart.rvs(df=m + 2, scale=np.eye(m)) cov = CovUnconstrainedCholesky(Sigma=cov_np) @@ -82,7 +82,7 @@ def test_CovConstant(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_CovIdentity(): +def test_CovIdentity(seeded_rng): cov = CovIdentity(size=m) @@ -94,7 +94,7 @@ def test_CovIdentity(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_CovIsotropic(): +def test_CovIsotropic(seeded_rng): cov = CovIsotropic(size=m) @@ -106,7 +106,7 @@ def test_CovIsotropic(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_CovDiagonal(): +def test_CovDiagonal(seeded_rng): cov = CovDiagonal(size=m) @@ -118,7 +118,7 @@ def test_CovDiagonal(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_CovDiagonal_initialized(): +def test_CovDiagonal_initialized(seeded_rng): cov_np = np.diag(np.exp(np.random.normal(size=m))) cov = CovDiagonal(size=m, diag_var=np.diag(cov_np)) @@ -130,7 +130,7 @@ def test_CovDiagonal_initialized(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_CovDiagonalGammaPrior(): +def test_CovDiagonalGammaPrior(seeded_rng): cov_np = np.diag(np.exp(np.random.normal(size=m))) cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5, @@ -147,7 +147,7 @@ def test_CovDiagonalGammaPrior(): assert_allclose(penalty_np, cov.logp, rtol=rtol) -def test_CovUnconstrainedCholesky(): +def test_CovUnconstrainedCholesky(seeded_rng): cov = CovUnconstrainedCholesky(size=m) @@ -159,7 +159,7 @@ def test_CovUnconstrainedCholesky(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_CovUnconstrainedCholeskyWishartReg(): +def test_CovUnconstrainedCholeskyWishartReg(seeded_rng): cov = CovUnconstrainedCholeskyWishartReg(size=m) @@ -175,7 +175,7 @@ def test_CovUnconstrainedCholeskyWishartReg(): assert_allclose(reg, cov.logp, rtol=rtol) -def test_CovUnconstrainedInvCholesky(): +def test_CovUnconstrainedInvCholesky(seeded_rng): init = invwishart.rvs(scale=np.eye(m), df=m + 2) cov = CovUnconstrainedInvCholesky(invSigma=init) @@ -190,7 +190,7 @@ def test_CovUnconstrainedInvCholesky(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_Cov2FactorKron(): +def test_Cov2FactorKron(seeded_rng): assert m % 2 == 0 dim1 = int(m / 2) dim2 = 2 @@ -211,7 +211,7 @@ def test_Cov2FactorKron(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_Cov3FactorKron(): +def test_Cov3FactorKron(seeded_rng): assert m % 4 == 0 dim1 = int(m / 4) dim2 = 2 @@ -232,7 +232,7 @@ def test_Cov3FactorKron(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_Cov3FactorMaskedKron(): +def test_Cov3FactorMaskedKron(seeded_rng): assert m % 4 == 0 dim1 = int(m / 4) dim2 = 2 @@ -267,7 +267,7 @@ def test_Cov3FactorMaskedKron(): ) -def test_CovAR1(): +def test_CovAR1(seeded_rng): cov = CovAR1(size=m) @@ -278,7 +278,7 @@ def test_CovAR1(): assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) -def test_CovAR1_scan_onsets(): +def test_CovAR1_scan_onsets(seeded_rng): cov = CovAR1(size=m, scan_onsets=[0, m // 2]) diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index 70a083753..eab43843c 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -19,7 +19,7 @@ rtol = 1e-7 -def test_against_scipy_mvn_row(): +def test_against_scipy_mvn_row(seeded_rng): rowcov = CovUnconstrainedCholesky(size=m) colcov = CovIdentity(size=n) @@ -34,7 +34,7 @@ def test_against_scipy_mvn_row(): assert_allclose(scipy_answer, tf_answer, rtol=rtol) -def test_against_scipy_mvn_col(): +def test_against_scipy_mvn_col(seeded_rng): rowcov = CovIdentity(size=m) colcov = CovUnconstrainedCholesky(size=n) diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 9f479e5b2..5c33c00db 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -22,7 +22,7 @@ rtol = 1e-7 -def test_against_scipy_mvn_row_conditional(): +def test_against_scipy_mvn_row_conditional(seeded_rng): # have to be careful for constructing everything as a submatrix of a big # PSD matrix, else no guarantee that anything's invertible. @@ -52,7 +52,7 @@ def test_against_scipy_mvn_row_conditional(): assert_allclose(scipy_answer, tf_answer, rtol=rtol) -def test_against_scipy_mvn_col_conditional(): +def test_against_scipy_mvn_col_conditional(seeded_rng): # have to be careful for constructing everything as a submatrix of a big # PSD matrix, else no guarantee that anything's invertible. diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index b5bcb6e87..411317b1e 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -23,7 +23,7 @@ rtol = 1e-7 -def test_against_scipy_mvn_row_marginal(): +def test_against_scipy_mvn_row_marginal(seeded_rng): rowcov = CovUnconstrainedCholesky(size=m) colcov = CovIdentity(size=n) @@ -46,7 +46,7 @@ def test_against_scipy_mvn_row_marginal(): assert_allclose(scipy_answer, tf_answer, rtol=rtol) -def test_against_scipy_mvn_col_marginal(): +def test_against_scipy_mvn_col_marginal(seeded_rng): rowcov = CovIdentity(size=m) colcov = CovUnconstrainedCholesky(size=n) diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 0af84f14a..26f2ed2b1 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -20,29 +20,32 @@ corrtol = 0.8 # at least this much correlation between true and est to pass -def test_matnorm_regression_unconstrained(): +def test_matnorm_regression_unconstrained(seeded_rng): # Y = XB + eps - # Y is m x n, B is n x p, eps is m x p + # Y is m x p, B is n x p, eps is m x p X = norm.rvs(size=(m, n)) B = norm.rvs(size=(n, p)) Y_hat = X.dot(B) rowcov_true = np.eye(m) colcov_true = wishart.rvs(p + 2, np.eye(p)) - y = Y_hat + rmn(rowcov_true, colcov_true) + Y = Y_hat + rmn(rowcov_true, colcov_true) row_cov = CovIdentity(size=m) col_cov = CovUnconstrainedCholesky(size=p) model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) - model.fit(X, y, naive_init=False) + model.fit(X, Y, naive_init=False) assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + pred_y = model.predict(X) + assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol + -def test_matnorm_regression_unconstrainedprec(): +def test_matnorm_regression_unconstrainedprec(seeded_rng): # Y = XB + eps # Y is m x n, B is n x p, eps is m x p @@ -63,8 +66,11 @@ def test_matnorm_regression_unconstrainedprec(): assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + pred_y = model.predict(X) + assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol -def test_matnorm_regression_optimizerChoice(): + +def test_matnorm_regression_optimizerChoice(seeded_rng): # Y = XB + eps # Y is m x n, B is n x p, eps is m x p @@ -86,8 +92,11 @@ def test_matnorm_regression_optimizerChoice(): assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + pred_y = model.predict(X) + assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol + -def test_matnorm_regression_scaledDiag(): +def test_matnorm_regression_scaledDiag(seeded_rng): # Y = XB + eps # Y is m x n, B is n x p, eps is m x p @@ -108,3 +117,12 @@ def test_matnorm_regression_scaledDiag(): model.fit(X, Y, naive_init=False) assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + + pred_y = model.predict(X) + assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol + + # we only do calibration test on the scaled diag + # model because to hit corrtol on unconstrainedCov + # we'd need a lot more data, which would make the test slow + X_hat = model.calibrate(Y) + assert pearsonr(X_hat.flatten(), X.flatten())[0] >= corrtol diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index ceb52b2a7..b1b1312d9 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -36,7 +36,7 @@ def gen_brsa_data_matnorm_model(U, n_T, n_V, space_cov, time_cov, n_nureg): return train, sizes -def test_brsa_rudimentary(): +def test_brsa_rudimentary(seeded_rng): """this test is super loose""" # this is Mingbo's synth example from the paper diff --git a/tests/matnormal/test_matnormal_utils.py b/tests/matnormal/test_matnormal_utils.py index 46fa2c691..113588255 100644 --- a/tests/matnormal/test_matnormal_utils.py +++ b/tests/matnormal/test_matnormal_utils.py @@ -2,7 +2,7 @@ import tensorflow as tf -def test_pack_unpack(): +def test_pack_unpack(seeded_rng): shapes = [[2, 3], [3], [3, 4, 2], [1, 5]] mats = [tf.random.stateless_normal( From 6250fc8407f16e98f77860dfaa32c1c73d04c29f Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sat, 22 Aug 2020 11:43:50 -0700 Subject: [PATCH 65/84] don't print debug on tests, improve cov --- tests/matnormal/test_cov.py | 4 -- tests/matnormal/test_matnormal_logp.py | 3 -- .../test_matnormal_logp_conditional.py | 3 -- .../matnormal/test_matnormal_logp_marginal.py | 3 -- tests/matnormal/test_matnormal_regression.py | 40 +++++++++++++++++-- tests/matnormal/test_matnormal_rsa.py | 11 +++-- 6 files changed, 45 insertions(+), 19 deletions(-) diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index 33b8274e6..d77404e17 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -16,10 +16,6 @@ CovKroneckerFactored, ) import pytest -import logging - - -logging.basicConfig(level=logging.DEBUG) # X is m x n, so A sould be m x p diff --git a/tests/matnormal/test_matnormal_logp.py b/tests/matnormal/test_matnormal_logp.py index eab43843c..55c474a42 100644 --- a/tests/matnormal/test_matnormal_logp.py +++ b/tests/matnormal/test_matnormal_logp.py @@ -6,9 +6,6 @@ from brainiak.matnormal.utils import rmn from brainiak.matnormal.matnormal_likelihoods import matnorm_logp from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky -import logging - -logging.basicConfig(level=logging.DEBUG) # X is m x n, so A sould be m x p diff --git a/tests/matnormal/test_matnormal_logp_conditional.py b/tests/matnormal/test_matnormal_logp_conditional.py index 5c33c00db..e85f34a0b 100644 --- a/tests/matnormal/test_matnormal_logp_conditional.py +++ b/tests/matnormal/test_matnormal_logp_conditional.py @@ -9,9 +9,6 @@ matnorm_logp_conditional_row, ) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky -import logging - -logging.basicConfig(level=logging.DEBUG) # X is m x n, so A sould be m x p diff --git a/tests/matnormal/test_matnormal_logp_marginal.py b/tests/matnormal/test_matnormal_logp_marginal.py index 411317b1e..53ca2b67e 100644 --- a/tests/matnormal/test_matnormal_logp_marginal.py +++ b/tests/matnormal/test_matnormal_logp_marginal.py @@ -10,9 +10,6 @@ ) from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky -import logging - -logging.basicConfig(level=logging.DEBUG) # X is m x n, so A sould be m x p diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index 26f2ed2b1..da6f7fc71 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -1,3 +1,4 @@ +import pytest import numpy as np from scipy.stats import norm, wishart, pearsonr @@ -9,9 +10,6 @@ ) from brainiak.matnormal.regression import MatnormalRegression from brainiak.matnormal.utils import rmn -import logging - -logging.basicConfig(level=logging.DEBUG) m = 100 n = 4 @@ -44,6 +42,15 @@ def test_matnorm_regression_unconstrained(seeded_rng): pred_y = model.predict(X) assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) + + model.fit(X, Y, naive_init=True) + + assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol + + pred_y = model.predict(X) + assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol + def test_matnorm_regression_unconstrainedprec(seeded_rng): @@ -126,3 +133,30 @@ def test_matnorm_regression_scaledDiag(seeded_rng): # we'd need a lot more data, which would make the test slow X_hat = model.calibrate(Y) assert pearsonr(X_hat.flatten(), X.flatten())[0] >= corrtol + + +def test_matnorm_calibration_raises(seeded_rng): + + # Y = XB + eps + # Y is m x n, B is n x p, eps is m x p + X = norm.rvs(size=(2, 5)) + B = norm.rvs(size=(5, 3)) + Y_hat = X.dot(B) + + rowcov_true = np.eye(2) + colcov_true = np.diag(np.abs(norm.rvs(size=3))) + + Y = Y_hat + rmn(rowcov_true, colcov_true) + + row_cov = CovIdentity(size=2) + col_cov = CovDiagonal(size=3) + + model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov) + + model.fit(X, Y, naive_init=False) + + with pytest.raises(RuntimeError): + model.calibrate(Y) + + + diff --git a/tests/matnormal/test_matnormal_rsa.py b/tests/matnormal/test_matnormal_rsa.py index b1b1312d9..f897c3c9e 100644 --- a/tests/matnormal/test_matnormal_rsa.py +++ b/tests/matnormal/test_matnormal_rsa.py @@ -4,9 +4,6 @@ from scipy.stats import norm from brainiak.matnormal.utils import rmn import numpy as np -import logging - -logging.basicConfig(level=logging.DEBUG) def gen_U_nips2016_example(): @@ -69,3 +66,11 @@ def test_brsa_rudimentary(seeded_rng): RMSE = np.mean((model_matnorm.C_ - cov2corr(tr["U"])) ** 2) ** 0.5 assert RMSE < 0.1 + + model_matnorm = MNRSA(time_cov=timecov_model, space_cov=spacecov_model) + + model_matnorm.fit(tr["Y"], tr["X"], naive_init=True) + + RMSE = np.mean((model_matnorm.C_ - cov2corr(tr["U"])) ** 2) ** 0.5 + + assert RMSE < 0.1 From 3dfec382b0e056c5db4bdfc64f5037b384a989c9 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sat, 22 Aug 2020 12:01:17 -0700 Subject: [PATCH 66/84] improve coverage --- brainiak/matnormal/regression.py | 6 +++-- tests/matnormal/test_cov.py | 26 +++++++++++++++++++- tests/matnormal/test_matnormal_regression.py | 3 --- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index d852a5657..3067fa89d 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -33,7 +33,8 @@ class MatnormalRegression(BaseEstimator): """ - def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", optCtrl=None): + def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B", + optCtrl=None): self.optMethod = optimizer if optCtrl is None: @@ -90,7 +91,8 @@ def lossfn(theta): x0 = pack_trainable_vars(self.train_variables) opt_results = minimize( - fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, **self.optCtrl + fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, + **self.optCtrl ) unpacked_theta = unpack_trainable_vars( diff --git a/tests/matnormal/test_cov.py b/tests/matnormal/test_cov.py index d77404e17..fd1c11ca0 100644 --- a/tests/matnormal/test_cov.py +++ b/tests/matnormal/test_cov.py @@ -1,3 +1,4 @@ +import pytest import numpy as np from numpy.testing import assert_allclose from scipy.stats import norm, wishart, invgamma, invwishart @@ -15,7 +16,6 @@ CovUnconstrainedInvCholesky, CovKroneckerFactored, ) -import pytest # X is m x n, so A sould be m x p @@ -101,6 +101,10 @@ def test_CovIsotropic(seeded_rng): assert_allclose(sinv_np, cov.solve(eye), rtol=rtol) assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) + # test initialization + cov = CovIsotropic(var=0.123, size=3) + assert_allclose(np.exp(cov.log_var.numpy()), 0.123) + def test_CovDiagonal(seeded_rng): @@ -273,6 +277,11 @@ def test_CovAR1(seeded_rng): assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) + # test initialization + cov = CovAR1(rho=0.3, sigma=1.3, size=3) + assert_allclose(np.exp(cov.log_sigma.numpy()), 1.3) + assert_allclose((2 * tf.sigmoid(cov.rho_unc) - 1).numpy(), 0.3) + def test_CovAR1_scan_onsets(seeded_rng): @@ -284,3 +293,18 @@ def test_CovAR1_scan_onsets(seeded_rng): logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np) assert_allclose(logdet_np, cov.logdet, rtol=rtol) assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol) + + +def test_raises(seeded_rng): + + with pytest.raises(RuntimeError): + CovUnconstrainedCholesky(Sigma=np.eye(3), size=4) + + with pytest.raises(RuntimeError): + CovUnconstrainedCholesky() + + with pytest.raises(RuntimeError): + CovUnconstrainedInvCholesky(invSigma=np.eye(3), size=4) + + with pytest.raises(RuntimeError): + CovUnconstrainedInvCholesky() diff --git a/tests/matnormal/test_matnormal_regression.py b/tests/matnormal/test_matnormal_regression.py index da6f7fc71..556ee81e8 100644 --- a/tests/matnormal/test_matnormal_regression.py +++ b/tests/matnormal/test_matnormal_regression.py @@ -157,6 +157,3 @@ def test_matnorm_calibration_raises(seeded_rng): with pytest.raises(RuntimeError): model.calibrate(Y) - - - From 549bf2a4c5f489a59eab62907e9b00b02bc59bf0 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sat, 22 Aug 2020 12:23:54 -0700 Subject: [PATCH 67/84] docstring cleanups --- brainiak/matnormal/__init__.py | 26 ++++----- brainiak/matnormal/covs.py | 2 +- brainiak/matnormal/matnormal_likelihoods.py | 60 +++++++++++---------- brainiak/matnormal/mnrsa.py | 7 +-- brainiak/matnormal/regression.py | 2 +- brainiak/matnormal/utils.py | 6 +-- 6 files changed, 54 insertions(+), 49 deletions(-) diff --git a/brainiak/matnormal/__init__.py b/brainiak/matnormal/__init__.py index 8b0b3c585..458caf473 100644 --- a/brainiak/matnormal/__init__.py +++ b/brainiak/matnormal/__init__.py @@ -4,7 +4,7 @@ .. math:: \\DeclareMathOperator{\\Tr}{Tr} - \\newcommand{\\trp}{{^\\top}} % transpose + \\newcommand{\\trp}{^{T}} % transpose \\newcommand{\\trace}{\\text{Trace}} % trace \\newcommand{\\inv}{^{-1}} \\newcommand{\\mb}{\\mathbf{b}} @@ -74,24 +74,24 @@ Uppercase subscripts for covariances help keep track where they come from. .. math:: - \\mathbf{X}_{ij} \\sim \\mathcal{MN}(\\mathbf{A}_{ij}, + \\mathbf{X}_{ij} &\\sim \\mathcal{MN}(\\mathbf{A}_{ij}, \\Sigma_{\\mathbf{X}i},\\Sigma_{\\mathbf{X}j})\\\\ - \\mathbf{Y}_{jk} \\sim \\mathcal{MN}(\\mathbf{B}_{jk}, + \\mathbf{Y}_{jk} &\\sim \\mathcal{MN}(\\mathbf{B}_{jk}, \\Sigma_{\\mathbf{Y}j},\\Sigma_{\\mathbf{Y}k})\\\\ - \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim + \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} &\\sim \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{Y}_{jk} + \\mathbf{C}_{ik}, \\Sigma_{\\mathbf{Z}_i}, \\Sigma_{\\mathbf{Z}_k})\\\\ We vectorize, and convert to a form we recognize as -$y \\sim \\mathcal{N}(Mx+b, \\Sigma)$. +:math:`y \\sim \\mathcal{N}(Mx+b, \\Sigma)`. .. math:: - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} &\\sim \\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{Y}_{jk}+\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})\\\\ \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} - \\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{Y}_{jk}) + &\\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{Y}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i}) @@ -126,15 +126,15 @@ and transform it back into a matrix normal: .. math:: - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} &\\sim \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{k}\\otimes\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{_k}\\otimes \\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp)\\\\ - \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim + \\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} &\\sim \\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}), \\Sigma_{k}\\otimes(\\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp))\\\\ - \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij} \\sim + \\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij} &\\sim \\mathcal{MN}(\\X\\mathbf{B}_{jk} + \\mathbf{C}_{ik}, \\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp,\\Sigma_{k}) @@ -144,17 +144,17 @@ \\mathcal{MN}(M\\trp, V, U)`: .. math:: - \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} \\sim + \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} &\\sim \\mathcal{MN}(\\mathbf{Y}_{jk}\\trp\\mathbf{X}_{ij}\\trp + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k},\\Sigma_{\\mathbf{Z}_i})\\\\ \\mbox{let } \\Sigma_i := \\Sigma_{\\mathbf{Z}_i}=\\Sigma_{\\mathbf{X}_i} \\\\ \\cdots\\\\ - \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{Y}_{jk} \\sim + \\mathbf{Z\\trp}_{ik}\\mid\\mathbf{Y}_{jk} &\\sim \\mathcal{MN}(\\mathbf{A}_{jk}\\trp\\mathbf{X}_{ij}\\trp + \\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k} + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y,\\Sigma_{\\mathbf{Z}_i})\\\\ - \\mathbf{Z}_{ik}\\mid\\mathbf{Y}_{jk} \\sim + \\mathbf{Z}_{ik}\\mid\\mathbf{Y}_{jk} &\\sim \\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{A}_{jk}+ \\mathbf{C}_{ik},\\Sigma_{\\mathbf{Z}_i},\\Sigma_{\\mathbf{Z}_k} + \\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index de6f41a25..84b386ce0 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -601,7 +601,7 @@ def logdet(self): def solve(self, X): """ Given this covariance :math:`\\Sigma` and some input :math:`X`, - compute Sigma^{-1} * x using traingular solves with the cholesky + compute :math:`\\Sigma^{-1}x` using traingular solves with the cholesky factors. Specifically, we solve :math:`L L^T x = y` by solving diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 5c75d79e4..aa58c6fa9 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -29,14 +29,14 @@ def solve_det_marginal(x, sigma, A, Q): Use matrix inversion lemma for the solve: .. math:: - (\\Sigma + AQA')^{-1} X =\\ + (\\Sigma + AQA^T)^{-1} X =\\ (\\Sigma^{-1} - \\Sigma^{-1} A (Q^{-1} + - A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X + A^T \\Sigma^{-1} A)^{-1} A^T \\Sigma^{-1}) X Use matrix determinant lemma for determinant: .. math:: - \\log|(\\Sigma + AQA')| = \\log|Q^{-1} + A' \\Sigma^{-1} A| + \\log|(\\Sigma + AQA^T)| = \\log|Q^{-1} + A^T \\Sigma^{-1} A| + \\log|Q| + \\log|\\Sigma| Parameters @@ -70,7 +70,7 @@ def solve_det_marginal(x, sigma, A, Q): "sigma min={tf.reduce_min(input_tensor=A)}", ) - # cholesky of (Qinv + A' Sigma^{-1} A), which looks sort of like + # cholesky of (Qinv + A^T Sigma^{-1} A), which looks sort of like # a schur complement but isn't, so we call it the "lemma factor" # since we use it in woodbury and matrix determinant lemmas lemma_factor = tlinalg.cholesky( @@ -95,9 +95,9 @@ def solve_det_marginal(x, sigma, A, Q): f"lemma factor logdet={lemma_logdet}", ) - # A' Sigma^{-1} + # A^T Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) - # (Qinv + A' Sigma^{-1} A)^{-1} A' Sigma^{-1} + # (Qinv + A^T Sigma^{-1} A)^{-1} A^T Sigma^{-1} prod_term = tlinalg.cholesky_solve(lemma_factor, Atrp_Sinv) solve = tf.matmul( @@ -112,15 +112,15 @@ def solve_det_conditional(x, sigma, A, Q): Use matrix inversion lemma for the solve: .. math:: - (\\Sigma - AQ^{-1}A')^{-1} X =\\ + (\\Sigma - AQ^{-1}A^T)^{-1} X =\\ (\\Sigma^{-1} + \\Sigma^{-1} A (Q - - A' \\Sigma^{-1} A)^{-1} A' \\Sigma^{-1}) X + A^T \\Sigma^{-1} A)^{-1} A^T \\Sigma^{-1}) X Use matrix determinant lemma for determinant: .. math:: - \\log|(\\Sigma - AQ^{-1}A')| = - \\log|Q - A' \\Sigma^{-1} A| - \\log|Q| + \\log|\\Sigma| + \\log|(\\Sigma - AQ^{-1}A^T)| = + \\log|Q - A^T \\Sigma^{-1} A| - \\log|Q| + \\log|\\Sigma| Parameters ---------- @@ -136,7 +136,7 @@ def solve_det_conditional(x, sigma, A, Q): """ - # (Q - A' Sigma^{-1} A) + # (Q - A^T Sigma^{-1} A) lemma_factor = tlinalg.cholesky( Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True) ) @@ -149,9 +149,9 @@ def solve_det_conditional(x, sigma, A, Q): tlinalg.diag_part(lemma_factor))) ) - # A' Sigma^{-1} + # A^T Sigma^{-1} Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True) - # (Q - A' Sigma^{-1} A)^{-1} A' Sigma^{-1} + # (Q - A^T Sigma^{-1} A)^{-1} A^T Sigma^{-1} prod_term = tlinalg.cholesky_solve(lemma_factor, Atrp_Sinv) solve = tf.matmul( @@ -235,9 +235,11 @@ def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov): Log likelihood for marginal centered matrix-variate normal density. .. math:: - X \\sim \\mathcal{MN}(0, Q, C)\\ - Y \\mid \\X \\sim \\mathcal{MN}(AX, R, C),\\ - Y \\sim \\mathcal{MN}(0, R + AQA', C) + X &\\sim \\mathcal{MN}(0, Q, C)\\ + + Y \\mid \\X &\\sim \\mathcal{MN}(AX, R, C),\\ + + Y &\\sim \\mathcal{MN}(0, R + AQA^T, C) This function efficiently computes the marginals by unpacking some info in the covariance classes and then dispatching to @@ -274,9 +276,11 @@ def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov): Log likelihood for centered marginal matrix-variate normal density. .. math:: - X \\sim \\mathcal{MN}(0, R, Q)\\ - Y \\mid \\X \\sim \\mathcal{MN}(XA, R, C),\\ - Y \\sim \\mathcal{MN}(0, R, C + A'QA) + X &\\sim \\mathcal{MN}(0, R, Q)\\ + + Y \\mid \\X &\\sim \\mathcal{MN}(XA, R, C),\\ + + Y &\\sim \\mathcal{MN}(0, R, C + A^TQA) This function efficiently computes the marginals by unpacking some info in the covariance classes and then dispatching to @@ -319,17 +323,17 @@ def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov): .. math:: \\begin{bmatrix} - \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ - \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] - \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} - \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\\\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right]\\end{bmatrix} + \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} \\Sigma_{j} \\otimes + \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\\\ \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} \\end{bmatrix}\\right) Then we can write the conditional: .. math:: - \\mathbf{X}^{\\top} j i \\mid \\mathbf{Y}_{k i}^{\\top} + \\mathbf{X}^T j i \\mid \\mathbf{Y}_{k i}^T \\sim \\mathcal{M}\\ \\mathcal{N}\\left(0, \\Sigma_{j}-\\Sigma_{j k} \\Sigma_{k}^{-1} \\Sigma_{k j},\\ @@ -376,10 +380,10 @@ def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov): .. math:: \\begin{bmatrix} - \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\ - \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right] - \\end{bmatrix} \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} - \\Sigma_{j} \\otimes \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\ + \\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\\\ + \\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right]\\end{bmatrix} + \\sim \\mathcal{N}\\left(0,\\begin{bmatrix} \\Sigma_{j} \\otimes + \\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\\\ \\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i} \\end{bmatrix}\\right) diff --git a/brainiak/matnormal/mnrsa.py b/brainiak/matnormal/mnrsa.py index eb96c32ba..0b175bf45 100644 --- a/brainiak/matnormal/mnrsa.py +++ b/brainiak/matnormal/mnrsa.py @@ -43,9 +43,10 @@ class MNRSA(BaseEstimator): support, you should use MNRSA. .. math:: - Y \\sim \\mathcal{MN}(0, \\Sigma_t + XLL^{\\top}X^{\\top}+ - X_0X_0^{\\top}, \\Sigma_s)\\ - U = LL^{\\top} + Y &\\sim \\mathcal{MN}(0, \\Sigma_t + XLL^TX^T+ + X_0X_0^T, \\Sigma_s)\\ + + U &= LL^T Parameters ---------- diff --git a/brainiak/matnormal/regression.py b/brainiak/matnormal/regression.py index 3067fa89d..816f22a7a 100644 --- a/brainiak/matnormal/regression.py +++ b/brainiak/matnormal/regression.py @@ -120,7 +120,7 @@ def calibrate(self, Y): trained mapping. This method just does naive MLE: .. math:: - X = Y \\Sigma_s^{-1}B'(B \\Sigma_s^{-1} B')^{-1} + X = Y \\Sigma_s^{-1}B^T(B \\Sigma_s^{-1} B^T)^{-1} Parameters ---------- diff --git a/brainiak/matnormal/utils.py b/brainiak/matnormal/utils.py index 4df851022..52d730bec 100644 --- a/brainiak/matnormal/utils.py +++ b/brainiak/matnormal/utils.py @@ -24,7 +24,7 @@ def rmn(rowcov, colcov): def xx_t(x): """ Outer product - :math:`xx^{\\top}` + :math:`xx^T` Parameters ----------- @@ -36,7 +36,7 @@ def xx_t(x): def x_tx(x): """Inner product - :math:`x^{\\top} x` + :math:`x^T x` Parameters ----------- @@ -48,7 +48,7 @@ def x_tx(x): def scaled_I(x, size): """Scaled identity matrix - :math:`x * I_{size}` + :math:`x I_{size}` Parameters ------------ From d0da34cb102ef8af584635095f088ff16d1f1eb5 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Sat, 22 Aug 2020 12:24:26 -0700 Subject: [PATCH 68/84] notation consistency fix for the example too --- examples/matnormal/MN-RSA.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/matnormal/MN-RSA.ipynb b/examples/matnormal/MN-RSA.ipynb index fc2849ae9..abb6178c4 100644 --- a/examples/matnormal/MN-RSA.ipynb +++ b/examples/matnormal/MN-RSA.ipynb @@ -8,7 +8,7 @@ "\n", "$$\n", "\\DeclareMathOperator{\\Tr}{Tr}\n", - "\\newcommand{\\trp}{{^\\top}} % transpose\n", + "\\newcommand{\\trp}{{^\\T}} % transpose\n", "\\newcommand{\\trace}{\\text{Trace}} % trace\n", "\\newcommand{\\inv}{^{-1}}\n", "\\newcommand{\\mb}{\\mathbf{b}}\n", From 0022cc75f16415ea76db8d9f18dfa22a45abb425 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 24 Aug 2020 21:08:01 -0700 Subject: [PATCH 69/84] addressing @mihaic's comments --- brainiak/matnormal/matnormal_likelihoods.py | 2 +- brainiak/utils/utils.py | 1 - setup.py | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index aa58c6fa9..55d3f3dff 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -67,7 +67,7 @@ def solve_det_marginal(x, sigma, A, Q): logging.log( logging.DEBUG, f"sigma max={tf.reduce_max(input_tensor=A)}," + - "sigma min={tf.reduce_min(input_tensor=A)}", + f"sigma min={tf.reduce_min(input_tensor=A)}", ) # cholesky of (Qinv + A^T Sigma^{-1} A), which looks sort of like diff --git a/brainiak/utils/utils.py b/brainiak/utils/utils.py index 8dd7bf202..df103c5bc 100644 --- a/brainiak/utils/utils.py +++ b/brainiak/utils/utils.py @@ -18,7 +18,6 @@ import psutil from .fmrisim import generate_stimfunction, _double_gamma_hrf, convolve_hrf from scipy.fftpack import fft, ifft - import logging logger = logging.getLogger(__name__) diff --git a/setup.py b/setup.py index 3adb64600..b632add27 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,6 @@ def finalize_options(self): 'pybind11>=1.7', 'psutil', 'nibabel', - 'typing', 'tensorflow', 'tensorflow_probability', 'joblib', From 32aabefa2d3a20211345f8c14354bdcbd950e8ed Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 27 Aug 2020 20:18:23 -0700 Subject: [PATCH 70/84] tf1->2 conversion script --- brainiak/matnormal/covs.py | 2 +- brainiak/matnormal/dpmnsrm.py | 96 ++++++++++++++++----------------- brainiak/matnormal/srm_margs.py | 42 +++++++-------- 3 files changed, 70 insertions(+), 70 deletions(-) diff --git a/brainiak/matnormal/covs.py b/brainiak/matnormal/covs.py index b61c09126..029703145 100644 --- a/brainiak/matnormal/covs.py +++ b/brainiak/matnormal/covs.py @@ -633,7 +633,7 @@ def __init__(self, base_cov, scale=1.0): def logdet(self): """ log|Sigma| """ - return self._baseCov.logdet + tf.log(self._scale) * self._baseCov.size + return self._baseCov.logdet + tf.math.log(self._scale) * self._baseCov.size def solve(self, X): """Given this Sigma and some X, compute :math:`Sigma^{-1} * x` diff --git a/brainiak/matnormal/dpmnsrm.py b/brainiak/matnormal/dpmnsrm.py index 7bd5acfe3..171212012 100644 --- a/brainiak/matnormal/dpmnsrm.py +++ b/brainiak/matnormal/dpmnsrm.py @@ -47,10 +47,10 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.optCtrl, self.optMethod = optCtrl, optMethod # create a tf session we reuse for this object - self.sess = tf.Session() + self.sess = tf.compat.v1.Session() def _eye(self, x): - return tf.diag(tf.ones((x), dtype=tf.float64)) + return tf.linalg.tensor_diag(tf.ones((x), dtype=tf.float64)) def _make_logp_op(self): """ MatnormSRM Log-likelihood""" @@ -58,16 +58,16 @@ def _make_logp_op(self): scale=1/self.rhoprec[j]) for j in range(self.n)] if self.marg_cov_class is CovIdentity: return tf.reduce_sum( - [matnorm_logp_marginal_col(self.X[j], + input_tensor=[matnorm_logp_marginal_col(self.X[j], row_cov=subj_space_covs[j], - col_cov=self.time_cov,ÃŽ + col_cov=self.time_cov, marg=self.S, marg_cov=CovIdentity(size=self.k)) for j in range(self.n)], name="lik_logp") elif self.marg_cov_class is CovUnconstrainedCholesky: return tf.reduce_sum( - [matnorm_logp_marginal_col(self.X[j], + input_tensor=[matnorm_logp_marginal_col(self.X[j], row_cov=subj_space_covs[j], col_cov=self.time_cov, marg=tf.matmul( @@ -78,7 +78,7 @@ def _make_logp_op(self): logger.warn("ECME with cov that is not identity or unconstrained may\ yield numerical instabilities! Use ECM for now.") return tf.reduce_sum( - [matnorm_logp_marginal_col(self.X[j], + input_tensor=[matnorm_logp_marginal_col(self.X[j], row_cov=subj_space_covs[j], col_cov=self.time_cov, marg=self.S, @@ -92,40 +92,40 @@ def _make_Q_op(self): [self.n, 1, 1])) # covs don't support batch ops (yet!) (TODO): - x_quad_form = -tf.trace(tf.reduce_sum( - [tf.matmul(self.time_cov.Sigma_inv_x( - tf.transpose(mean[j])), + x_quad_form = -tf.linalg.trace(tf.reduce_sum( + input_tensor=[tf.matmul(self.time_cov.Sigma_inv_x( + tf.transpose(a=mean[j])), self.space_cov.Sigma_inv_x(mean[j])) * self.rhoprec[j] - for j in range(self.n)], 0)) + for j in range(self.n)], axis=0)) - w_quad_form = -tf.trace(tf.reduce_sum( - [tf.matmul(self.marg_cov.Sigma_inv_x( - tf.transpose(self.w_prime[j])), + w_quad_form = -tf.linalg.trace(tf.reduce_sum( + input_tensor=[tf.matmul(self.marg_cov.Sigma_inv_x( + tf.transpose(a=self.w_prime[j])), self.space_cov.Sigma_inv_x(self.w_prime[j])) * self.rhoprec[j] - for j in range(self.n)], 0)) + for j in range(self.n)], axis=0)) if self.s_constraint == "gaussian": s_quad_form = - \ - tf.trace(tf.matmul(self.time_cov.Sigma_inv_x( - tf.transpose(self.S)), self.S)) + tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x( + tf.transpose(a=self.S)), self.S)) det_terms = -(self.v*self.n+self.k) * self.time_cov.logdet -\ (self.k+self.t)*self.n*self.space_cov.logdet +\ - (self.k+self.t)*self.v*tf.reduce_sum(tf.log(self.rhoprec)) -\ + (self.k+self.t)*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ (self.n*self.v)*self.marg_cov.logdet else: s_quad_form = 0 det_terms = -(self.v*self.n)*self.time_cov.logdet -\ (self.k+self.t)*self.n*self.space_cov.logdet +\ - (self.k+self.t)*self.v*tf.reduce_sum(tf.log(self.rhoprec)) -\ + (self.k+self.t)*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ (self.n*self.v)*self.marg_cov.logdet - trace_prod = -tf.reduce_sum(self.rhoprec / self.rhoprec_prime) *\ - tf.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ - (tf.trace(tf.matmul(self.wcov_prime, self.marg_cov.Sigma_inv + + trace_prod = -tf.reduce_sum(input_tensor=self.rhoprec / self.rhoprec_prime) *\ + tf.linalg.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ + (tf.linalg.trace(tf.matmul(self.wcov_prime, self.marg_cov.Sigma_inv + tf.matmul(self.S, self.time_cov.Sigma_inv_x( - tf.transpose(self.S)))))) + tf.transpose(a=self.S)))))) return 0.5 * (det_terms + x_quad_form + @@ -137,14 +137,14 @@ def make_estep_ops(self): rhoprec_prime = self.rhoprec vcov_prime = self.space_cov.Sigma - wchol = tf.cholesky(self.marg_cov.Sigma_inv + + wchol = tf.linalg.cholesky(self.marg_cov.Sigma_inv + tf.matmul(self.S, self.time_cov.Sigma_inv_x( - tf.transpose(self.S)))) + tf.transpose(a=self.S)))) - wcov_prime = tf.cholesky_solve(wchol, self._eye(self.k)) + wcov_prime = tf.linalg.cholesky_solve(wchol, self._eye(self.k)) stacked_rhs = tf.tile(tf.expand_dims(self.time_cov.Sigma_inv_x( - tf.transpose(tf.cholesky_solve(wchol, self.S))), 0), + tf.transpose(a=tf.linalg.cholesky_solve(wchol, self.S))), 0), [self.n, 1, 1]) w_prime = tf.matmul(self.X-self.b, stacked_rhs) @@ -153,28 +153,28 @@ def make_estep_ops(self): def make_mstep_b_op(self): return tf.expand_dims(tf.reduce_sum( - [self.time_cov.Sigma_inv_x(tf.transpose(self.X[j] - + input_tensor=[self.time_cov.Sigma_inv_x(tf.transpose(a=self.X[j] - tf.matmul(self.w_prime[j], self.S))) - for j in range(self.n)], 1) / - tf.reduce_sum(self.time_cov.Sigma_inv), -1) + for j in range(self.n)], axis=1) / + tf.reduce_sum(input_tensor=self.time_cov.Sigma_inv), -1) def make_mstep_S_op(self): wtw = tf.reduce_sum( - [tf.matmul(self.w_prime[j], + input_tensor=[tf.matmul(self.w_prime[j], self.space_cov.Sigma_inv_x(self.w_prime[j]), transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], 0) + self.rhoprec[j] for j in range(self.n)], axis=0) wtx = tf.reduce_sum( - [tf.matmul(self.w_prime[j], + input_tensor=[tf.matmul(self.w_prime[j], self.space_cov.Sigma_inv_x(self.X[j]-self.b[j]), transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], 0) + self.rhoprec[j] for j in range(self.n)], axis=0) - return tf.matrix_solve(wtw + - tf.reduce_sum(self.rhoprec / + return tf.linalg.solve(wtw + + tf.reduce_sum(input_tensor=self.rhoprec / self.rhoprec_prime) * - tf.trace(self.space_cov.Sigma_inv_x( + tf.linalg.trace(self.space_cov.Sigma_inv_x( self.vcov_prime)) * self.wcov_prime + self._eye(self.k), wtx) @@ -186,23 +186,23 @@ def make_mstep_rhoprec_op(self): [self.n, 1, 1])) mean_trace = tf.stack( - [tf.trace(tf.matmul(self.time_cov.Sigma_inv_x( - tf.transpose(mean[j])), + [tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x( + tf.transpose(a=mean[j])), self.space_cov.Sigma_inv_x(mean[j]))) for j in range(self.n)]) w_trace = tf.stack( - [tf.trace(tf.matmul(self.marg_cov.Sigma_inv_x( - tf.transpose(self.w_prime[j])), + [tf.linalg.trace(tf.matmul(self.marg_cov.Sigma_inv_x( + tf.transpose(a=self.w_prime[j])), self.space_cov.Sigma_inv_x(self.w_prime[j]))) for j in range(self.n)]) shared_term = (1/self.rhoprec_prime) *\ - tf.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ - tf.trace(tf.matmul(self.wcov_prime, + tf.linalg.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ + tf.linalg.trace(tf.matmul(self.wcov_prime, self.marg_cov.Sigma_inv + tf.matmul(self.S, self.time_cov.Sigma_inv_x( - tf.transpose(self.S))))) + tf.transpose(a=self.S))))) rho_hat_unscaled = mean_trace + w_trace + shared_term return (self.v*(self.k+self.t)) / rho_hat_unscaled @@ -237,7 +237,7 @@ def _init_vars(self, X): self.S_trp = tf.Variable(np.average([s[2][:self.k, :] for s in xsvd], 0).T, dtype=tf.float64, name="S_transpose") - self.S = tf.transpose(self.S_trp) + self.S = tf.transpose(a=self.S_trp) def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): """ @@ -299,7 +299,7 @@ def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): # we're not pymanopting problem.backend._session = self.sess - self.sess.run(tf.global_variables_initializer()) + self.sess.run(tf.compat.v1.global_variables_initializer()) converged = False for i in range(max_iter): @@ -315,7 +315,7 @@ def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): # for convergence, we check w, rho, and sigma_v (since we # use them for reconstruction/projection) - w_norm = tf.norm(w_prime_new - self.w_prime).eval( + w_norm = tf.norm(tensor=w_prime_new - self.w_prime).eval( session=self.sess) / (self.n*self.v*self.k) # update (since we reuse wcov_prime in computing w_prime) self.w_prime.load(w_prime_new, session=self.sess) @@ -331,7 +331,7 @@ def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): self.b.load(b_op.eval(session=self.sess), session=self.sess) rhoprec_new = rhoprec_op.eval(session=self.sess) - rhoprec_norm = tf.norm(rhoprec_new - self.rhoprec).eval( + rhoprec_norm = tf.norm(tensor=rhoprec_new - self.rhoprec).eval( session=self.sess) / self.n self.rhoprec.load(rhoprec_new, session=self.sess) @@ -353,7 +353,7 @@ def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): if self.space_noise_cov_class is not CovIdentity: sigma_v_opt.minimize(session=self.sess) - sigv_norm = tf.norm(old_sigma_v - self.space_cov.Sigma).eval( + sigv_norm = tf.norm(tensor=old_sigma_v - self.space_cov.Sigma).eval( session=self.sess) / (self.v**2) if self.time_noise_cov_class is not CovIdentity: diff --git a/brainiak/matnormal/srm_margs.py b/brainiak/matnormal/srm_margs.py index e1f5d5098..83305a404 100644 --- a/brainiak/matnormal/srm_margs.py +++ b/brainiak/matnormal/srm_margs.py @@ -31,10 +31,10 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.optCtrl, self.optMethod = optCtrl, optMethod # create a tf session we reuse for this object - self.sess = tf.Session() + self.sess = tf.compat.v1.Session() def _eye(self, x): - return tf.diag(tf.ones((x), dtype=tf.float64)) + return tf.linalg.tensor_diag(tf.ones((x), dtype=tf.float64)) def _make_Q_op(self): mean = self.X - self.b - tf.matmul(self.w, tf.tile(tf.expand_dims(self.s_prime, 0), [self.n, 1, 1]) ) @@ -42,24 +42,24 @@ def _make_Q_op(self): det_terms = -(self.v*self.n + self.k)*self.time_cov.logdet -\ (self.t*self.n)*self.space_cov.logdet -\ self.t*self.marg_cov.logdet +\ - (self.t*self.v)*tf.reduce_sum(tf.log(self.rhoprec)) + (self.t*self.v)*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) # used twice below - trace_t_t = tf.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) + trace_t_t = tf.linalg.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) # covs don't support batch ops (yet!) (TODO): - x_quad_form = -tf.trace(tf.reduce_sum([tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(mean[j])), + x_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(a=mean[j])), self.space_cov.Sigma_inv_x(mean[j]))*self.rhoprec[j] - for j in range(self.n)], 0)) + for j in range(self.n)], axis=0)) - w_quad_form = -tf.trace(tf.reduce_sum([tf.matmul(tf.matmul(self.scov_prime, tf.transpose(self.w[j])), + w_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=self.w[j])), self.space_cov.Sigma_inv_x(self.w[j]))*self.rhoprec[j] - for j in range(self.n)], 0)) * trace_t_t + for j in range(self.n)], axis=0)) * trace_t_t - s_quad_form = -tf.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(self.s_prime)), + s_quad_form = -tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(a=self.s_prime)), self.marg_cov.Sigma_inv_x(self.s_prime))) - sig_trace_prod = -trace_t_t * tf.trace(self.marg_cov.Sigma_inv_x(self.scov_prime)) + sig_trace_prod = -trace_t_t * tf.linalg.trace(self.marg_cov.Sigma_inv_x(self.scov_prime)) return 0.5 * (det_terms + x_quad_form + s_quad_form + w_quad_form + sig_trace_prod)#, det_terms, x_quad_form, s_quad_form, w_quad_form, sig_trace_prod @@ -68,33 +68,33 @@ def make_estep_ops(self): tcov_prime = self.time_cov.Sigma Xmb = self.X - self.b - sprec_chol = tf.cholesky(self.marg_cov.Sigma_inv + tf.reduce_sum([tf.matmul(tf.transpose(self.w[j]), self.space_cov.Sigma_inv_x(self.w[j]))*self.rhoprec[j] for j in range(self.n)], 0)) + sprec_chol = tf.linalg.cholesky(self.marg_cov.Sigma_inv + tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose(a=self.w[j]), self.space_cov.Sigma_inv_x(self.w[j]))*self.rhoprec[j] for j in range(self.n)], axis=0)) - wsig_x = tf.reduce_sum([tf.matmul(tf.transpose(self.w[j]), self.space_cov.Sigma_inv_x(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], 0) + wsig_x = tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose(a=self.w[j]), self.space_cov.Sigma_inv_x(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], axis=0) - scov_prime = tf.cholesky_solve(sprec_chol, self._eye(self.k)) + scov_prime = tf.linalg.cholesky_solve(sprec_chol, self._eye(self.k)) - s_prime = tf.cholesky_solve(sprec_chol, wsig_x) + s_prime = tf.linalg.cholesky_solve(sprec_chol, wsig_x) return s_prime, scov_prime, tcov_prime def make_mstep_b_op(self): - return tf.expand_dims(tf.reduce_sum([self.time_cov.Sigma_inv_x(tf.transpose(self.X[j] - + return tf.expand_dims(tf.reduce_sum(input_tensor=[self.time_cov.Sigma_inv_x(tf.transpose(a=self.X[j] - tf.matmul(self.w[j],self.s_prime))) - for j in range(self.n)], 1) / - tf.reduce_sum(self.time_cov.Sigma_inv), -1) + for j in range(self.n)], axis=1) / + tf.reduce_sum(input_tensor=self.time_cov.Sigma_inv), -1) def make_mstep_rhoprec_op(self): mean = self.X - self.b - tf.matmul(self.w, tf.tile(tf.expand_dims(self.s_prime,0), [self.n, 1, 1]) ) - mean_trace = tf.stack([tf.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(mean[j])), + mean_trace = tf.stack([tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(a=mean[j])), self.space_cov.Sigma_inv_x(mean[j]))) for j in range(self.n)]) - trace_t_t = tf.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) + trace_t_t = tf.linalg.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) - w_trace = trace_t_t * tf.stack([tf.trace(tf.matmul(tf.matmul(self.scov_prime, tf.transpose(self.w[j])), + w_trace = trace_t_t * tf.stack([tf.linalg.trace(tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=self.w[j])), self.space_cov.Sigma_inv_x(self.w[j]))) for j in range(self.n)]) rho_hat_unscaled = mean_trace + w_trace @@ -174,7 +174,7 @@ def fit(self, X, n_iter=10, y=None, w_cov=None): for i in range(self.n): w_problems[i].backend._session = self.sess - self.sess.run(tf.global_variables_initializer()) + self.sess.run(tf.compat.v1.global_variables_initializer()) for em_iter in range(n_iter): q_start = q_op.eval(session=self.sess) From 3bbd540cc86a8a8e0e3cd087083e080a8068a7ec Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Mon, 31 Aug 2020 22:23:03 -0700 Subject: [PATCH 71/84] wip pymanopt stuff --- brainiak/matnormal/srm_margs.py | 232 +++++++++++++++++++------------- 1 file changed, 141 insertions(+), 91 deletions(-) diff --git a/brainiak/matnormal/srm_margs.py b/brainiak/matnormal/srm_margs.py index 83305a404..27193e20e 100644 --- a/brainiak/matnormal/srm_margs.py +++ b/brainiak/matnormal/srm_margs.py @@ -1,26 +1,32 @@ import tensorflow as tf +import numpy as np + from pymanopt import Problem -from pymanopt.manifolds import Stiefel, Euclidean +from pymanopt.manifolds import Stiefel, Euclidean, Product from pymanopt.solvers import TrustRegions, ConjugateGradient + from sklearn.base import BaseEstimator + from brainiak.matnormal.covs import CovIdentity -import numpy as np from brainiak.matnormal.matnormal_likelihoods import ( matnorm_logp_marginal_col, matnorm_logp) -from tensorflow.contrib.opt import ScipyOptimizerInterface +from brainiak.matnormal.utils import make_val_and_grad + + import logging logger = logging.getLogger(__name__) + class MNSRM_OrthoW(BaseEstimator): """Probabilistic SRM, aka SRM with marginalization over S (and ortho W) """ def __init__(self, n_features=5, time_noise_cov=CovIdentity, space_noise_cov=CovIdentity, s_cov=CovIdentity, - optMethod="L-BFGS-B",optCtrl={}): + optMethod="L-BFGS-B", optCtrl={}): self.k = n_features @@ -30,47 +36,52 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.optCtrl, self.optMethod = optCtrl, optMethod - # create a tf session we reuse for this object - self.sess = tf.compat.v1.Session() - def _eye(self, x): return tf.linalg.tensor_diag(tf.ones((x), dtype=tf.float64)) - def _make_Q_op(self): - mean = self.X - self.b - tf.matmul(self.w, tf.tile(tf.expand_dims(self.s_prime, 0), [self.n, 1, 1]) ) + def Q_fun(self, Wlist, X): + W = tf.stack(Wlist) + print(W.shape) + mean = X - self.b - \ + tf.matmul(W, tf.tile(tf.expand_dims( + self.s_prime, 0), [self.n, 1, 1])) det_terms = -(self.v*self.n + self.k)*self.time_cov.logdet -\ (self.t*self.n)*self.space_cov.logdet -\ - self.t*self.marg_cov.logdet +\ - (self.t*self.v)*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) + self.t*self.marg_cov.logdet +\ + (self.t*self.v) * \ + tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) # used twice below - trace_t_t = tf.linalg.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) + trace_t_t = tf.linalg.trace(self.time_cov.solve(self.tcov_prime)) # covs don't support batch ops (yet!) (TODO): - x_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(a=mean[j])), - self.space_cov.Sigma_inv_x(mean[j]))*self.rhoprec[j] - for j in range(self.n)], axis=0)) + x_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(self.time_cov.solve(tf.transpose(a=mean[j])), + self.space_cov.solve(mean[j]))*self.rhoprec[j] + for j in range(self.n)], axis=0)) - w_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=self.w[j])), - self.space_cov.Sigma_inv_x(self.w[j]))*self.rhoprec[j] - for j in range(self.n)], axis=0)) * trace_t_t + w_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=W[j])), + self.space_cov.solve(W[j]))*self.rhoprec[j] + for j in range(self.n)], axis=0)) * trace_t_t - s_quad_form = -tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(a=self.s_prime)), - self.marg_cov.Sigma_inv_x(self.s_prime))) + s_quad_form = -tf.linalg.trace(tf.matmul(self.time_cov.solve(tf.transpose(a=self.s_prime)), + self.marg_cov.solve(self.s_prime))) - sig_trace_prod = -trace_t_t * tf.linalg.trace(self.marg_cov.Sigma_inv_x(self.scov_prime)) + sig_trace_prod = -trace_t_t * \ + tf.linalg.trace(self.marg_cov.solve(self.scov_prime)) - return 0.5 * (det_terms + x_quad_form + s_quad_form + w_quad_form + sig_trace_prod)#, det_terms, x_quad_form, s_quad_form, w_quad_form, sig_trace_prod + return 0.5 * (det_terms + x_quad_form + s_quad_form + w_quad_form + sig_trace_prod) - def make_estep_ops(self): + def estep(self): - tcov_prime = self.time_cov.Sigma - Xmb = self.X - self.b + tcov_prime = self.time_cov + Xmb = X - self.b - sprec_chol = tf.linalg.cholesky(self.marg_cov.Sigma_inv + tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose(a=self.w[j]), self.space_cov.Sigma_inv_x(self.w[j]))*self.rhoprec[j] for j in range(self.n)], axis=0)) + sprec_chol = tf.linalg.cholesky(self.marg_cov._prec + tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose( + a=self.w[j]), self.space_cov.solve(self.w[j]))*self.rhoprec[j] for j in range(self.n)], axis=0)) - wsig_x = tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose(a=self.w[j]), self.space_cov.Sigma_inv_x(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], axis=0) + wsig_x = tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose( + a=self.w[j]), self.space_cov.solve(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], axis=0) scov_prime = tf.linalg.cholesky_solve(sprec_chol, self._eye(self.k)) @@ -78,28 +89,54 @@ def make_estep_ops(self): return s_prime, scov_prime, tcov_prime - def make_mstep_b_op(self): + def mstep_b(self): - return tf.expand_dims(tf.reduce_sum(input_tensor=[self.time_cov.Sigma_inv_x(tf.transpose(a=self.X[j] - - tf.matmul(self.w[j],self.s_prime))) - for j in range(self.n)], axis=1) / - tf.reduce_sum(input_tensor=self.time_cov.Sigma_inv), -1) + return tf.expand_dims(tf.reduce_sum(input_tensor=[self.time_cov.solve(tf.transpose(a=X[j] - + tf.matmul(self.w[j], self.s_prime))) + for j in range(self.n)], axis=1) / + tf.reduce_sum(input_tensor=self.time_cov._prec), -1) - def make_mstep_rhoprec_op(self): + def mstep_rhoprec(self): - mean = self.X - self.b - tf.matmul(self.w, tf.tile(tf.expand_dims(self.s_prime,0), [self.n, 1, 1]) ) + mean = X - self.b - \ + tf.matmul(self.w, tf.tile(tf.expand_dims( + self.s_prime, 0), [self.n, 1, 1])) - mean_trace = tf.stack([tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x(tf.transpose(a=mean[j])), - self.space_cov.Sigma_inv_x(mean[j]))) for j in range(self.n)]) + mean_trace = tf.stack([tf.linalg.trace(tf.matmul(self.time_cov.solve(tf.transpose(a=mean[j])), + self.space_cov.solve(mean[j]))) for j in range(self.n)]) - trace_t_t = tf.linalg.trace(self.time_cov.Sigma_inv_x(self.tcov_prime)) + trace_t_t = tf.linalg.trace(self.time_cov.solve(self.tcov_prime)) w_trace = trace_t_t * tf.stack([tf.linalg.trace(tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=self.w[j])), - self.space_cov.Sigma_inv_x(self.w[j]))) for j in range(self.n)]) + self.space_cov.solve(self.w[j]))) for j in range(self.n)]) - rho_hat_unscaled = mean_trace + w_trace + rho_hat_unscaled = mean_trace + w_trace return (self.v*self.t) / rho_hat_unscaled +import numpy as np +voxels = 100 +samples = 500 +subjects = 2 +features = 3 + +# Create a Shared response S with K = 3 +theta = np.linspace(-4 * np.pi, 4 * np.pi, samples) +z = np.linspace(-2, 2, samples) +r = z**2 + 1 +x = r * np.sin(theta) +y = r * np.cos(theta) + +S = np.vstack((x, y, z)) + +X = [] +W = [] + +for subject in range(subjects): + Q, R = np.linalg.qr(np.random.random((voxels, features))) + W.append(Q) + X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples))) + +self = MNSRM_OrthoW() def fit(self, X, n_iter=10, y=None, w_cov=None): """ @@ -117,14 +154,15 @@ def fit(self, X, n_iter=10, y=None, w_cov=None): self.v, self.t = X[0].shape - self.X = tf.constant(X, name="X") + X = tf.stack(X, name="X") xsvd = [np.linalg.svd(x)for x in X] # parameters - self.b = tf.Variable(np.random.normal(size=(self.n, self.v,1)), name="b") + self.b = tf.Variable(np.random.normal( + size=(self.n, self.v, 1)), name="b") self.rhoprec = tf.Variable(np.ones(self.n), name="rhoprec") - wlist_np = [sv[0][:,:self.k] for sv in xsvd] + wlist_np = [sv[0][:, :self.k] for sv in xsvd] self.wlist = [tf.Variable(_w) for _w in wlist_np] self.w = tf.stack(self.wlist) self.space_cov = self.space_noise_cov_class(size=self.v) @@ -132,78 +170,90 @@ def fit(self, X, n_iter=10, y=None, w_cov=None): self.marg_cov = self.time_noise_cov_class(size=self.k) # sufficient statistics - self.s_prime = tf.Variable(np.average([sv[2][:self.k, :] for sv in xsvd], 0), dtype=tf.float64, name="s_prime") - self.scov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") - self.tcov_prime = tf.Variable(np.eye(self.t), name="wcov_prime") + self.s_prime = tf.Variable(np.average( + [sv[2][:self.k, :] for sv in xsvd], 0), dtype=tf.float64, name="s_prime") + self.scov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") + self.tcov_prime = tf.Variable(np.eye(self.t), name="wcov_prime") # self.Lambda = tf.diag(tf.ones(self.k, dtype=tf.float64)) * 1000 # just there for the q improvement assertion check - s_prime_op, scov_prime_op, tcov_prime_op = self.make_estep_ops() + # s_prime_op, scov_prime_op, tcov_prime_op = self.make_estep_ops() # can update these guys in closed form - b_op = self.make_mstep_b_op() - rhoprec_op = self.make_mstep_rhoprec_op() + # b_op = self.make_mstep_b_op() + # rhoprec_op = self.make_mstep_rhoprec_op() - q_op = self._make_Q_op() + # q_op = self._make_Q_op() - sigma_v_opt = ScipyOptimizerInterface(-q_op, - var_list=self.space_cov.get_optimize_vars(), - method=self.optMethod, - options=self.optCtrl) + # sigma_v_opt = ScipyOptimizerInterface(-q_op, + # var_list=self.space_cov.get_optimize_vars(), + # method=self.optMethod, + # options=self.optCtrl) - sigma_t_opt = ScipyOptimizerInterface(-q_op, - var_list=self.time_cov.get_optimize_vars(), - method=self.optMethod, - options=self.optCtrl) + # sigma_t_opt = ScipyOptimizerInterface(-q_op, + # var_list=self.time_cov.get_optimize_vars(), + # method=self.optMethod, + # options=self.optCtrl) - sigma_s_opt = ScipyOptimizerInterface(-q_op, - var_list=self.marg_cov.get_optimize_vars(), - method=self.optMethod, - options=self.optCtrl) + # sigma_s_opt = ScipyOptimizerInterface(-q_op, + # var_list=self.marg_cov.get_optimize_vars(), + # method=self.optMethod, + # options=self.optCtrl) - w_manifold = Stiefel(self.t, self.k) + w_manifold = Product([Stiefel(self.t, self.k) for i in range(self.n)]) # s_trp_manifold = Euclidean(self.t, self.k) - solver = ConjugateGradient() + # solver = ConjugateGradient() # this would be faster but need to work through some dtype wrangling with # the internals of pymanopt - # solver = TrustRegions() + solver = TrustRegions() + + @TensorFlow + def lossfn_Q(arg1, arg2): + print(arg1.shape) + print(arg2.shape) + return -self.Q_fun([arg1, arg2], X) + + egrad = lossfn_Q.compute_gradient() + egrad(*[w.numpy() for w in self.wlist]) + + ehess = lossfn_Q.compute_hessian_vector_product() + ehess(*[w.numpy() for w in self.wlist], *[np.ones(self.v) for i in range(self.n)]) - w_problems = [Problem(manifold=w_manifold, cost=-q_op, arg=_w, verbosity=0) for _w in self.wlist] + # val_and_grad = make_val_and_grad(lossfn_Q, self.wlist) + # x0 = pack_trainable_vars(self.train_variables) - # hacky hack hack to let us maintain state on the things we're not pymanopting - for i in range(self.n): - w_problems[i].backend._session = self.sess + # opt_results = minimize( + # fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, + # **self.optCtrl + # ) - self.sess.run(tf.compat.v1.global_variables_initializer()) - for em_iter in range(n_iter): - q_start = q_op.eval(session=self.sess) + w_problem = Problem(manifold=w_manifold, cost=lossfn_Q)# verbosity=0) for i in range(self.n)] + em_iter = 0 + # for em_iter in range(n_iter): + q_start = self.Q_fun(self.wlist, X) logger.info("Iter %i, Q at start %f" % (em_iter, q_start)) # ESTEP # compute all the terms with old vals - s_prime_new = s_prime_op.eval(session=self.sess) - tcov_prime_new = tcov_prime_op.eval(session=self.sess) - scov_prime_new = scov_prime_op.eval(session=self.sess) + s_prime_new, scov_prime_new, tcov_prime_new = self.estep() - # then update (since we reuse wcov_prime in computing w_prime) - self.s_prime.load(s_prime_new, session=self.sess) - self.scov_prime.load(scov_prime_new, session=self.sess) - self.tcov_prime.load(tcov_prime_new, session=self.sess) - - q_end_estep = q_op.eval(session=self.sess) + q_end_estep = self.Q_fun(self.wlist, X) logger.info("Iter %i, Q at estep end %f" % (em_iter, q_end_estep)) # MSTEP - # analytic parts: b and rho! that's sort of bad actually - self.b.load(b_op.eval(session=self.sess), session=self.sess) - rhoprec_new = rhoprec_op.eval(session=self.sess) + self.b = self.mstep_b() + rhoprec_new = self.mstep_rhoprec() # rhoprec_norm = tf.norm(rhoprec_new - self.rhoprec).eval(session=self.sess) / self.n - self.rhoprec.load(rhoprec_new, session=self.sess) - + # optimization parts: + + solver.solve(w_problem, x=[w.numpy() for w in self.wlist]) + + i = 0 for i in range(self.n): - new_w = solver.solve(w_problems[i], x=self.wlist[i].eval(session=self.sess)) + new_w = solver.solve( + w_problems[i], x=self.wlist[i].numpy()) self.wlist[i].load(new_w, session=self.sess) if self.space_noise_cov_class is not CovIdentity: @@ -224,13 +274,13 @@ def fit(self, X, n_iter=10, y=None, w_cov=None): self.s_ = self.s_prime.eval(session=self.sess) self.rho_ = 1/self.rhoprec.eval(session=self.sess) - def transform(self, X): - vprec = self.space_cov.Sigma_inv.eval(session=self.sess) + vprec = self.space_cov._prec.eval(session=self.sess) return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) def transform_orthow(self, X): # orthonormalize W - w_ortho = [w @ np.linalg.svd(w.T @ w)[0] / np.sqrt(np.linalg.svd(w.T @ w)[1]) for w in self.w_] - vprec = self.space_cov.Sigma_inv.eval(session=self.sess) - return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) \ No newline at end of file + w_ortho = [w @ np.linalg.svd( + w.T @ w)[0] / np.sqrt(np.linalg.svd(w.T @ w)[1]) for w in self.w_] + vprec = self.space_cov._prec.eval(session=self.sess) + return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) From 5aaf0755c7aa43b3a15f2581a35f6c3a5beb775f Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 24 Dec 2020 15:17:26 -0800 Subject: [PATCH 72/84] mn-srm ported to tf2 --- brainiak/matnormal/srm_margs.py | 262 +++++++++++--------------- tests/matnormal/test_matnormal_srm.py | 72 +++++++ 2 files changed, 186 insertions(+), 148 deletions(-) create mode 100644 tests/matnormal/test_matnormal_srm.py diff --git a/brainiak/matnormal/srm_margs.py b/brainiak/matnormal/srm_margs.py index 27193e20e..f91e6d285 100644 --- a/brainiak/matnormal/srm_margs.py +++ b/brainiak/matnormal/srm_margs.py @@ -1,27 +1,32 @@ -import tensorflow as tf +import inspect +import logging +import warnings + +from brainiak.matnormal.covs import CovIdentity +from brainiak.matnormal.utils import make_val_and_grad, pack_trainable_vars + import numpy as np from pymanopt import Problem -from pymanopt.manifolds import Stiefel, Euclidean, Product -from pymanopt.solvers import TrustRegions, ConjugateGradient +from pymanopt.function import TensorFlow +from pymanopt.manifolds import Product, Stiefel +from pymanopt.solvers import TrustRegions -from sklearn.base import BaseEstimator - -from brainiak.matnormal.covs import CovIdentity -from brainiak.matnormal.matnormal_likelihoods import ( - matnorm_logp_marginal_col, - matnorm_logp) -from brainiak.matnormal.utils import make_val_and_grad +from scipy.optimize import minimize +from sklearn.base import BaseEstimator -import logging +import tensorflow as tf logger = logging.getLogger(__name__) -class MNSRM_OrthoW(BaseEstimator): - """Probabilistic SRM, aka SRM with marginalization over S (and ortho W) +class MNSRM(BaseEstimator): + """Probabilistic SRM, aka SRM with marginalization over S (and + orthonormal W). This generalizes SRM (Chen et al. 2015) by allowing + arbitrary kronecker-structured residual covariance. Inference is + performed by ECM algorithm. """ def __init__(self, n_features=5, time_noise_cov=CovIdentity, @@ -36,12 +41,11 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.optCtrl, self.optMethod = optCtrl, optMethod - def _eye(self, x): - return tf.linalg.tensor_diag(tf.ones((x), dtype=tf.float64)) + def Q_fun(self, W, X): + """ + Q function for ECM algorithm + """ - def Q_fun(self, Wlist, X): - W = tf.stack(Wlist) - print(W.shape) mean = X - self.b - \ tf.matmul(W, tf.tile(tf.expand_dims( self.s_prime, 0), [self.n, 1, 1])) @@ -72,34 +76,44 @@ def Q_fun(self, Wlist, X): return 0.5 * (det_terms + x_quad_form + s_quad_form + w_quad_form + sig_trace_prod) - def estep(self): + def estep(self, X): + """ + Compute expectation of the log posterior density (aka complete data log-likelihood) + for ECM. + """ tcov_prime = self.time_cov Xmb = X - self.b sprec_chol = tf.linalg.cholesky(self.marg_cov._prec + tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose( - a=self.w[j]), self.space_cov.solve(self.w[j]))*self.rhoprec[j] for j in range(self.n)], axis=0)) + a=self.W[j]), self.space_cov.solve(self.W[j]))*self.rhoprec[j] for j in range(self.n)], axis=0)) wsig_x = tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose( - a=self.w[j]), self.space_cov.solve(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], axis=0) + a=self.W[j]), self.space_cov.solve(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], axis=0) - scov_prime = tf.linalg.cholesky_solve(sprec_chol, self._eye(self.k)) + scov_prime = tf.linalg.cholesky_solve( + sprec_chol, tf.eye(self.k, dtype=tf.float64)) s_prime = tf.linalg.cholesky_solve(sprec_chol, wsig_x) - return s_prime, scov_prime, tcov_prime + return s_prime, scov_prime, tcov_prime._cov - def mstep_b(self): + def mstep_b(self, X): + """ + Update b (intercept term) as part of M-step. + """ - return tf.expand_dims(tf.reduce_sum(input_tensor=[self.time_cov.solve(tf.transpose(a=X[j] - - tf.matmul(self.w[j], self.s_prime))) + return tf.expand_dims(tf.reduce_sum(input_tensor=[self.time_cov.solve(tf.transpose(a=X[j] - tf.matmul(self.W[j], self.s_prime))) for j in range(self.n)], axis=1) / tf.reduce_sum(input_tensor=self.time_cov._prec), -1) - def mstep_rhoprec(self): + def mstep_rhoprec(self, X): + """ + Update rho^-1 (subject-wise precision scalers) as part of M-step. + """ mean = X - self.b - \ - tf.matmul(self.w, tf.tile(tf.expand_dims( + tf.matmul(self.W, tf.tile(tf.expand_dims( self.s_prime, 0), [self.n, 1, 1])) mean_trace = tf.stack([tf.linalg.trace(tf.matmul(self.time_cov.solve(tf.transpose(a=mean[j])), @@ -107,47 +121,32 @@ def mstep_rhoprec(self): trace_t_t = tf.linalg.trace(self.time_cov.solve(self.tcov_prime)) - w_trace = trace_t_t * tf.stack([tf.linalg.trace(tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=self.w[j])), - self.space_cov.solve(self.w[j]))) for j in range(self.n)]) + w_trace = trace_t_t * tf.stack([tf.linalg.trace(tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=self.W[j])), + self.space_cov.solve(self.W[j]))) for j in range(self.n)]) rho_hat_unscaled = mean_trace + w_trace return (self.v*self.t) / rho_hat_unscaled -import numpy as np -voxels = 100 -samples = 500 -subjects = 2 -features = 3 -# Create a Shared response S with K = 3 -theta = np.linspace(-4 * np.pi, 4 * np.pi, samples) -z = np.linspace(-2, 2, samples) -r = z**2 + 1 -x = r * np.sin(theta) -y = r * np.cos(theta) - -S = np.vstack((x, y, z)) - -X = [] -W = [] - -for subject in range(subjects): - Q, R = np.linalg.qr(np.random.random((voxels, features))) - W.append(Q) - X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples))) - -self = MNSRM_OrthoW() - - def fit(self, X, n_iter=10, y=None, w_cov=None): + def fit(self, X, n_iter=10, y=None, w_cov=None, svd_init=True): """ - find W marginalizing S + Fit SRM by ECM marginalizing over S. Parameters ---------- X: 2d array Brain data matrix (voxels by TRs). Y in the math n_iter: int, default=10 - Number of iterations to run + Number of ECM iterations to run + y: None + Ignored (just here for sklearn API compatibility) + w_cov : CovBase, default = CovIdentity + Prior covariance of the columns of W. + svd_init : bool, default=True + If true, initialize to the W_i left singular vectors of + X_i and S to the average of the right singular vectors + over all subjects. If false, initialize to random orthonormal + matrices. """ self.n = len(X) @@ -156,131 +155,98 @@ def fit(self, X, n_iter=10, y=None, w_cov=None): X = tf.stack(X, name="X") - xsvd = [np.linalg.svd(x)for x in X] + if svd_init: + xsvd = [np.linalg.svd(x) for x in X] + else: + xsvd = [np.linalg.svd(np.random.normal( + size=(self.v, self.t))) for i in range(self.n)] + + w_init = [sv[0][:, :self.k] for sv in xsvd] + s_init = np.average([sv[2][:self.k, :] for sv in xsvd], 0) # parameters self.b = tf.Variable(np.random.normal( size=(self.n, self.v, 1)), name="b") self.rhoprec = tf.Variable(np.ones(self.n), name="rhoprec") - wlist_np = [sv[0][:, :self.k] for sv in xsvd] - self.wlist = [tf.Variable(_w) for _w in wlist_np] - self.w = tf.stack(self.wlist) + self.W = tf.Variable(tf.stack([_w for _w in w_init])) self.space_cov = self.space_noise_cov_class(size=self.v) self.time_cov = self.time_noise_cov_class(size=self.t) self.marg_cov = self.time_noise_cov_class(size=self.k) # sufficient statistics - self.s_prime = tf.Variable(np.average( - [sv[2][:self.k, :] for sv in xsvd], 0), dtype=tf.float64, name="s_prime") + self.s_prime = tf.Variable(s_init, dtype=tf.float64, name="s_prime") self.scov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") self.tcov_prime = tf.Variable(np.eye(self.t), name="wcov_prime") - # self.Lambda = tf.diag(tf.ones(self.k, dtype=tf.float64)) * 1000 # just there for the q improvement assertion check - - # s_prime_op, scov_prime_op, tcov_prime_op = self.make_estep_ops() - - # can update these guys in closed form - # b_op = self.make_mstep_b_op() - # rhoprec_op = self.make_mstep_rhoprec_op() - - # q_op = self._make_Q_op() - - # sigma_v_opt = ScipyOptimizerInterface(-q_op, - # var_list=self.space_cov.get_optimize_vars(), - # method=self.optMethod, - # options=self.optCtrl) + # Pymanopt setup - # sigma_t_opt = ScipyOptimizerInterface(-q_op, - # var_list=self.time_cov.get_optimize_vars(), - # method=self.optMethod, - # options=self.optCtrl) + # now we fool pymanopt into thinking we prepicked + # number of args even though we use varargs + def wrapped_Q(*args): + return -self.Q_fun(args, X) - # sigma_s_opt = ScipyOptimizerInterface(-q_op, - # var_list=self.marg_cov.get_optimize_vars(), - # method=self.optMethod, - # options=self.optCtrl) + sig = inspect.signature(wrapped_Q) + newparams = [inspect.Parameter( + f"w_{i}", inspect.Parameter.POSITIONAL_ONLY) for i in range(self.n)] + newsig = sig.replace(parameters=newparams) + wrapped_Q.__signature__ = newsig + lossfn_Q = TensorFlow(wrapped_Q) - w_manifold = Product([Stiefel(self.t, self.k) for i in range(self.n)]) - # s_trp_manifold = Euclidean(self.t, self.k) - # solver = ConjugateGradient() - # this would be faster but need to work through some dtype wrangling with - # the internals of pymanopt - solver = TrustRegions() + w_manifold = Product([Stiefel(self.v, self.k) for i in range(self.n)]) + solver = TrustRegions(logverbosity=0) + w_problem = Problem(manifold=w_manifold, cost=lossfn_Q) - @TensorFlow - def lossfn_Q(arg1, arg2): - print(arg1.shape) - print(arg2.shape) - return -self.Q_fun([arg1, arg2], X) - - egrad = lossfn_Q.compute_gradient() - egrad(*[w.numpy() for w in self.wlist]) - - ehess = lossfn_Q.compute_hessian_vector_product() - ehess(*[w.numpy() for w in self.wlist], *[np.ones(self.v) for i in range(self.n)]) - - # val_and_grad = make_val_and_grad(lossfn_Q, self.wlist) - # x0 = pack_trainable_vars(self.train_variables) - - # opt_results = minimize( - # fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, - # **self.optCtrl - # ) - - - w_problem = Problem(manifold=w_manifold, cost=lossfn_Q)# verbosity=0) for i in range(self.n)] - em_iter = 0 - # for em_iter in range(n_iter): - q_start = self.Q_fun(self.wlist, X) + for em_iter in range(n_iter): + q_start = self.Q_fun(self.W, X) logger.info("Iter %i, Q at start %f" % (em_iter, q_start)) # ESTEP # compute all the terms with old vals - s_prime_new, scov_prime_new, tcov_prime_new = self.estep() + s_prime_new, scov_prime_new, _ = self.estep(X) + self.s_prime.assign(s_prime_new, read_value=False) + self.scov_prime.assign(scov_prime_new, read_value=False) + # don't assign tcov since it is not updated in margS SRM - q_end_estep = self.Q_fun(self.wlist, X) + q_end_estep = self.Q_fun(self.W, X) logger.info("Iter %i, Q at estep end %f" % (em_iter, q_end_estep)) # MSTEP - self.b = self.mstep_b() - rhoprec_new = self.mstep_rhoprec() - # rhoprec_norm = tf.norm(rhoprec_new - self.rhoprec).eval(session=self.sess) / self.n - # optimization parts: + # closed form parts + self.b = self.mstep_b(X) + self.rhoprec = self.mstep_rhoprec(X) - solver.solve(w_problem, x=[w.numpy() for w in self.wlist]) + # optimization parts: + # Stiefel manifold for orthonormal W + new_w = solver.solve( + w_problem, x=[self.W[i].numpy() for i in range(self.n)]) - i = 0 - for i in range(self.n): - new_w = solver.solve( - w_problems[i], x=self.wlist[i].numpy()) - self.wlist[i].load(new_w, session=self.sess) + self.W.assign(new_w, read_value=False) - if self.space_noise_cov_class is not CovIdentity: - sigma_v_opt.minimize(session=self.sess) + # L-BFGS for residual covs + for cov in [self.space_cov, self.time_cov, self.marg_cov]: + if len(cov.get_optimize_vars()) > 0: + def lossfn(Q): return -self.Q_fun(self.W, X) + val_and_grad = make_val_and_grad( + lossfn, cov.get_optimize_vars()) - if self.time_noise_cov_class is not CovIdentity: - sigma_t_opt.minimize(session=self.sess) + x0 = pack_trainable_vars(cov.get_optimize_vars()) - if self.marg_cov_class is not CovIdentity: - sigma_s_opt.minimize(session=self.sess) + opt_results = minimize( + fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, + **self.optCtrl + ) + assert opt_results.success, "L-BFGS for covariances failed!" - q_end_mstep = q_op.eval(session=self.sess) + q_end_mstep = self.Q_fun(self.W, X) logger.info("Iter %i, Q at mstep end %f" % (em_iter, q_end_mstep)) - assert q_end_estep >= q_start - assert q_end_mstep >= q_end_estep + assert q_end_estep >= q_start, "Q increased in E-step!" + assert q_end_mstep >= q_end_estep, "Q increased in M-step!" - self.w_ = self.w.eval(session=self.sess) - self.s_ = self.s_prime.eval(session=self.sess) - self.rho_ = 1/self.rhoprec.eval(session=self.sess) + self.w_ = [self.W[i].numpy() for i in range(self.n)] + self.s_ = self.s_prime.numpy() + self.rho_ = 1/self.rhoprec.numpy() def transform(self, X): - vprec = self.space_cov._prec.eval(session=self.sess) - return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) - - def transform_orthow(self, X): - # orthonormalize W - w_ortho = [w @ np.linalg.svd( - w.T @ w)[0] / np.sqrt(np.linalg.svd(w.T @ w)[1]) for w in self.w_] - vprec = self.space_cov._prec.eval(session=self.sess) + vprec = self.space_cov._prec.numpy() return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) diff --git a/tests/matnormal/test_matnormal_srm.py b/tests/matnormal/test_matnormal_srm.py new file mode 100644 index 000000000..f89d35c25 --- /dev/null +++ b/tests/matnormal/test_matnormal_srm.py @@ -0,0 +1,72 @@ +from brainiak.matnormal.srm_margs import MNSRM + +import numpy as np + +from scipy.stats import pearsonr + +import tensorflow as tf + + +def test_mnsrm_margs(): + np.random.seed(1) + tf.random.set_seed(1) + voxels = 10 + samples = 50 + subjects = 2 + features = 3 + + # Create a Shared response S with K = 3 + theta = np.linspace(-4 * np.pi, 4 * np.pi, samples) + z = np.linspace(-2, 2, samples) + r = z**2 + 1 + x = r * np.sin(theta) + y = r * np.cos(theta) + + S = np.vstack((x, y, z)) + + X = [] + W = [] + + for subject in range(subjects): + Q, R = np.linalg.qr(np.random.random((voxels, features))) + W.append(Q) + X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples))) + + model = MNSRM(n_features=features) + assert model, "Cannot instantiate MNSRM!" + + # test that it works without svd init + model.fit(X, n_iter=5, svd_init=False) + + assert model.s_.shape == (features, samples), "S wrong shape!" + + for i in range(subjects): + assert model.w_[i].shape == (voxels, features), f"W[{i}] wrong shape!" + + assert model.rho_.shape[0] == subjects, "rho wrong shape!" + + # check that reconstruction isn't terrible + reconstructions = [model.w_[i] @ model.s_ for i in range(subjects)] + corrs = [pearsonr(r.flatten(), x.flatten())[0] + for r, x in zip(reconstructions, X)] + for corr in corrs: + assert corr > 0.9, "Reconstruction with svd_init=False is bad! " + + model = MNSRM(n_features=features) + + # test that it works with svd init + model.fit(X, n_iter=5, svd_init=True) + + assert model.s_.shape == (features, samples), "S wrong shape!" + + for i in range(subjects): + assert model.w_[i].shape == (voxels, features), f"W[{i}] wrong shape!" + + assert model.rho_.shape[0] == subjects, "rho wrong shape!" + + # check that reconstruction isn't terrible + reconstructions = [model.w_[i] @ model.s_ for i in range(subjects)] + corrs = [pearsonr(r.flatten(), x.flatten())[0] + for r, x in zip(reconstructions, X)] + for corr in corrs: + assert corr > 0.9, "Reconstruction svd_init=True is bad! " From fee2c8db8c48d8ce20ad636d234f191d2c73bf63 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Thu, 24 Dec 2020 15:19:56 -0800 Subject: [PATCH 73/84] consistent naming --- brainiak/matnormal/{dpmnsrm.py => srm_margw.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename brainiak/matnormal/{dpmnsrm.py => srm_margw.py} (100%) diff --git a/brainiak/matnormal/dpmnsrm.py b/brainiak/matnormal/srm_margw.py similarity index 100% rename from brainiak/matnormal/dpmnsrm.py rename to brainiak/matnormal/srm_margw.py From 2320c038df0119cf984a06dab601a4bfe60cc9f6 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 25 Dec 2020 17:25:51 -0800 Subject: [PATCH 74/84] works now --- brainiak/matnormal/srm_margw.py | 439 ++++++++++++++------------------ 1 file changed, 191 insertions(+), 248 deletions(-) diff --git a/brainiak/matnormal/srm_margw.py b/brainiak/matnormal/srm_margw.py index 171212012..96f9137db 100644 --- a/brainiak/matnormal/srm_margw.py +++ b/brainiak/matnormal/srm_margw.py @@ -1,7 +1,7 @@ import tensorflow as tf from pymanopt import Problem from pymanopt.manifolds import Stiefel -from pymanopt.solvers import ConjugateGradient +from pymanopt.solvers import TrustRegions from sklearn.base import BaseEstimator from brainiak.matnormal.covs import (CovIdentity, CovScaleMixin, @@ -9,15 +9,20 @@ import numpy as np from brainiak.matnormal.matnormal_likelihoods import ( matnorm_logp_marginal_col) -from tensorflow.contrib.opt import ScipyOptimizerInterface +from brainiak.matnormal.utils import pack_trainable_vars, make_val_and_grad import logging - +from pymanopt.function import TensorFlow +from scipy.optimize import minimize logger = logging.getLogger(__name__) class DPMNSRM(BaseEstimator): - """Dual probabilistic SRM, aka SRM with marginalization over W + """Probabilistic SRM, aka SRM with marginalization over W (and optionally, + orthonormal S). In contrast to SRM (Chen et al. 2015), this estimates + far fewer parameters due to the W integral, and includes support for + arbitrary kronecker-structured residual covariance. Inference is + performed by ECM algorithm. """ def __init__(self, n_features=5, time_noise_cov=CovIdentity, @@ -37,8 +42,12 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, logger.warn("Gaussian S means w_cov can be I w.l.o.g., using\ more general covs not recommended") else: - logger.error("Unknown s_constraint! Defaulting to orthonormal.") - self.s_constraint = "ortho" + raise RuntimeError( + f"Unknown s_constraint! Expected 'ortho' or 'gaussian', got {s_constraint}!") + + if algorithm not in ["ECM", "ECME"]: + raise RuntimeError( + f"Unknown algorithm! Expected 'ECM' or 'ECME', got {algorithm}!") self.time_noise_cov_class = time_noise_cov self.space_noise_cov_class = space_noise_cov @@ -46,86 +55,83 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.optCtrl, self.optMethod = optCtrl, optMethod - # create a tf session we reuse for this object - self.sess = tf.compat.v1.Session() - - def _eye(self, x): - return tf.linalg.tensor_diag(tf.ones((x), dtype=tf.float64)) - - def _make_logp_op(self): - """ MatnormSRM Log-likelihood""" + def logp_margw(self, X): + """ MatnormSRM Log-likelihood with marginal""" subj_space_covs = [CovScaleMixin(base_cov=self.space_cov, scale=1/self.rhoprec[j]) for j in range(self.n)] if self.marg_cov_class is CovIdentity: return tf.reduce_sum( - input_tensor=[matnorm_logp_marginal_col(self.X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=self.S, - marg_cov=CovIdentity(size=self.k)) - for j in range(self.n)], name="lik_logp") + input_tensor=[matnorm_logp_marginal_col(X[j], + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=self.S, + marg_cov=CovIdentity(size=self.k)) + for j in range(self.n)], name="lik_logp") elif self.marg_cov_class is CovUnconstrainedCholesky: return tf.reduce_sum( - input_tensor=[matnorm_logp_marginal_col(self.X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=tf.matmul( + input_tensor=[matnorm_logp_marginal_col(X[j], + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=tf.matmul( self.marg_cov.L, self.S), marg_cov=CovIdentity(size=self.k)) - for j in range(self.n)], name="lik_logp") + for j in range(self.n)], name="lik_logp") else: logger.warn("ECME with cov that is not identity or unconstrained may\ yield numerical instabilities! Use ECM for now.") return tf.reduce_sum( - input_tensor=[matnorm_logp_marginal_col(self.X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=self.S, - marg_cov=self.marg_cov) - for j in range(self.n)], name="lik_logp") - - def _make_Q_op(self): - - mean = self.X - self.b - tf.matmul(self.w_prime, - tf.tile(tf.expand_dims(self.S, 0), - [self.n, 1, 1])) + input_tensor=[matnorm_logp_marginal_col(X[j], + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=self.S, + marg_cov=self.marg_cov) + for j in range(self.n)], name="lik_logp") + + def Q_fun_margw(self, Strp, X): + # shorthands for readability + kt = self.k * self.t + nv = self.n * self.v + + mean = X - self.b - tf.matmul(self.w_prime, + tf.tile(tf.expand_dims(self.S, 0), + [self.n, 1, 1])) # covs don't support batch ops (yet!) (TODO): x_quad_form = -tf.linalg.trace(tf.reduce_sum( - input_tensor=[tf.matmul(self.time_cov.Sigma_inv_x( + input_tensor=[tf.matmul(self.time_cov.solve( tf.transpose(a=mean[j])), - self.space_cov.Sigma_inv_x(mean[j])) * + self.space_cov.solve(mean[j])) * self.rhoprec[j] for j in range(self.n)], axis=0)) w_quad_form = -tf.linalg.trace(tf.reduce_sum( - input_tensor=[tf.matmul(self.marg_cov.Sigma_inv_x( + input_tensor=[tf.matmul(self.marg_cov.solve( tf.transpose(a=self.w_prime[j])), - self.space_cov.Sigma_inv_x(self.w_prime[j])) * + self.space_cov.solve(self.w_prime[j])) * self.rhoprec[j] for j in range(self.n)], axis=0)) if self.s_constraint == "gaussian": s_quad_form = - \ - tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x( + tf.linalg.trace(tf.matmul(self.time_cov.solve( tf.transpose(a=self.S)), self.S)) det_terms = -(self.v*self.n+self.k) * self.time_cov.logdet -\ - (self.k+self.t)*self.n*self.space_cov.logdet +\ - (self.k+self.t)*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ - (self.n*self.v)*self.marg_cov.logdet + kt*self.n*self.space_cov.logdet +\ + kt*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ + nv*self.marg_cov.logdet else: s_quad_form = 0 det_terms = -(self.v*self.n)*self.time_cov.logdet -\ - (self.k+self.t)*self.n*self.space_cov.logdet +\ - (self.k+self.t)*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ - (self.n*self.v)*self.marg_cov.logdet + kt*self.n*self.space_cov.logdet +\ + kt*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ + nv*self.marg_cov.logdet trace_prod = -tf.reduce_sum(input_tensor=self.rhoprec / self.rhoprec_prime) *\ - tf.linalg.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ - (tf.linalg.trace(tf.matmul(self.wcov_prime, self.marg_cov.Sigma_inv + - tf.matmul(self.S, self.time_cov.Sigma_inv_x( - tf.transpose(a=self.S)))))) + tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) *\ + (tf.linalg.trace(tf.matmul(self.wcov_prime, self.marg_cov._prec + + tf.matmul(self.S, self.time_cov.solve( + tf.transpose(a=self.S)))))) return 0.5 * (det_terms + x_quad_form + @@ -133,112 +139,139 @@ def _make_Q_op(self): trace_prod + s_quad_form) - def make_estep_ops(self): + def estep_margw(self, X): - rhoprec_prime = self.rhoprec - vcov_prime = self.space_cov.Sigma - wchol = tf.linalg.cholesky(self.marg_cov.Sigma_inv + - tf.matmul(self.S, self.time_cov.Sigma_inv_x( - tf.transpose(a=self.S)))) + wchol = tf.linalg.cholesky(self.marg_cov._prec + + tf.matmul(self.S, self.time_cov.solve( + tf.transpose(a=self.S)))) - wcov_prime = tf.linalg.cholesky_solve(wchol, self._eye(self.k)) + wcov_prime = tf.linalg.cholesky_solve(wchol, tf.eye(self.k, dtype=tf.float64)) - stacked_rhs = tf.tile(tf.expand_dims(self.time_cov.Sigma_inv_x( + stacked_rhs = tf.tile(tf.expand_dims(self.time_cov.solve( tf.transpose(a=tf.linalg.cholesky_solve(wchol, self.S))), 0), [self.n, 1, 1]) w_prime = tf.matmul(self.X-self.b, stacked_rhs) - return w_prime, rhoprec_prime, vcov_prime, wcov_prime + # rhoprec doesn't change + # vcov doesn't change + self.w_prime.assign(w_prime) + self.wcov_prime.assign(wcov_prime) - def make_mstep_b_op(self): + + def mstep_b_margw(self, X): return tf.expand_dims(tf.reduce_sum( - input_tensor=[self.time_cov.Sigma_inv_x(tf.transpose(a=self.X[j] - - tf.matmul(self.w_prime[j], self.S))) - for j in range(self.n)], axis=1) / - tf.reduce_sum(input_tensor=self.time_cov.Sigma_inv), -1) + input_tensor=[self.time_cov.solve(tf.transpose(a=X[j] - + tf.matmul(self.w_prime[j], self.S))) + for j in range(self.n)], axis=1) / + tf.reduce_sum(input_tensor=self.time_cov._prec), -1) - def make_mstep_S_op(self): + def mstep_S_nonortho(self, X): wtw = tf.reduce_sum( input_tensor=[tf.matmul(self.w_prime[j], - self.space_cov.Sigma_inv_x(self.w_prime[j]), - transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], axis=0) + self.space_cov.solve( + self.w_prime[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], axis=0) wtx = tf.reduce_sum( input_tensor=[tf.matmul(self.w_prime[j], - self.space_cov.Sigma_inv_x(self.X[j]-self.b[j]), - transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], axis=0) + self.space_cov.solve( + X[j]-self.b[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], axis=0) - return tf.linalg.solve(wtw + - tf.reduce_sum(input_tensor=self.rhoprec / - self.rhoprec_prime) * - tf.linalg.trace(self.space_cov.Sigma_inv_x( - self.vcov_prime)) * - self.wcov_prime + self._eye(self.k), wtx) + return tf.linalg.solve(wtw + tf.reduce_sum(input_tensor=self.rhoprec_prime / self.rhoprec) * + tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) * + self.wcov_prime + tf.eye(self.k, dtype=tf.float64), wtx) - def make_mstep_rhoprec_op(self): + def mstep_rhoprec_margw(self, X): - mean = self.X - self.b -\ + mean = X - self.b -\ tf.matmul(self.w_prime, tf.tile(tf.expand_dims(self.S, 0), [self.n, 1, 1])) mean_trace = tf.stack( - [tf.linalg.trace(tf.matmul(self.time_cov.Sigma_inv_x( + [tf.linalg.trace(tf.matmul(self.time_cov.solve( tf.transpose(a=mean[j])), - self.space_cov.Sigma_inv_x(mean[j]))) for j in range(self.n)]) + self.space_cov.solve(mean[j]))) for j in range(self.n)]) w_trace = tf.stack( - [tf.linalg.trace(tf.matmul(self.marg_cov.Sigma_inv_x( + [tf.linalg.trace(tf.matmul(self.marg_cov.solve( tf.transpose(a=self.w_prime[j])), - self.space_cov.Sigma_inv_x(self.w_prime[j]))) + self.space_cov.solve(self.w_prime[j]))) for j in range(self.n)]) shared_term = (1/self.rhoprec_prime) *\ - tf.linalg.trace(self.space_cov.Sigma_inv_x(self.vcov_prime)) *\ + tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) *\ tf.linalg.trace(tf.matmul(self.wcov_prime, - self.marg_cov.Sigma_inv + - tf.matmul(self.S, - self.time_cov.Sigma_inv_x( - tf.transpose(a=self.S))))) + self.marg_cov._prec + + tf.matmul(self.S, + self.time_cov.solve( + tf.transpose(a=self.S))))) rho_hat_unscaled = mean_trace + w_trace + shared_term return (self.v*(self.k+self.t)) / rho_hat_unscaled - def _init_vars(self, X): + def mstep_margw(self, X): + # closed form parts + self.b = self.mstep_b_margw(X) + self.rhoprec = self.mstep_rhoprec_margw(X) + + # optimization parts: + # Stiefel manifold for orthonormal S (if ortho_s) + if self.s_constraint == "ortho": + new_Strp = self.solver.solve(self.problem, x=self.S.numpy().T) + self.S.assign(new_Strp.T) + else: + # if it's not ortho, it's just least squares update + self.S.assign(self.mstep_S_nonortho(X)) + # L-BFGS for residual covs + for cov in [self.space_cov, self.time_cov, self.marg_cov]: + if len(cov.get_optimize_vars()) > 0: + def lossfn(Q): return -self.Q_fun_margw(self.S, X) + val_and_grad = make_val_and_grad( + lossfn, cov.get_optimize_vars()) + + x0 = pack_trainable_vars(cov.get_optimize_vars()) + + opt_results = minimize( + fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, + **self.optCtrl + ) + assert opt_results.success, "L-BFGS for covariances failed!" + + def _init_vars(self, X, svd_init=False): self.n = len(X) self.v, self.t = X[0].shape self.X = tf.constant(X, name="X") - xsvd = [np.linalg.svd(x) for x in X] + if svd_init: + xinit = [np.linalg.svd(x) for x in X] + else: + xinit = [np.linalg.svd(np.random.normal( + size=(self.v, self.t))) for i in range(self.n)] # parameters self.b = tf.Variable(np.random.normal(size=(self.n, self.v, 1)), name="b") self.rhoprec = tf.Variable(np.ones(self.n), name="rhoprec") + self.space_cov = self.space_noise_cov_class(size=self.v) + self.time_cov = self.time_noise_cov_class(size=self.t) + self.marg_cov = self.marg_cov_class(size=self.k) + self.S = tf.Variable(np.average([s[2][:self.k, :] for s in xinit],0), + dtype=tf.float64, name="S") - self.w_prime = tf.Variable(np.array([s[0][:, :self.k] for s in xsvd]), + # sufficient statistics + self.w_prime = tf.Variable(np.array([s[0][:, :self.k] for s in xinit]), name="w_prime") self.rhoprec_prime = tf.Variable(np.ones(self.n), name="rhoprec_prime") self.wcov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") self.vcov_prime = tf.Variable(np.eye(self.v), name="vcov_prime") - self.space_cov = self.space_noise_cov_class(size=self.v) - self.time_cov = self.time_noise_cov_class(size=self.t) - self.marg_cov = self.marg_cov_class(size=self.k) - - # we need Strp to be the actual param because stiefel is on the rows, - # and might as well initialize with SVD - - self.S_trp = tf.Variable(np.average([s[2][:self.k, :] for s in xsvd], - 0).T, - dtype=tf.float64, name="S_transpose") - self.S = tf.transpose(a=self.S_trp) - def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): """ find S marginalizing W @@ -255,167 +288,77 @@ def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): X = np.array(X).astype(np.float64) self._init_vars(X) - (w_prime_op, - rhoprec_prime_op, - vcov_prime_op, - wcov_prime_op) = self.make_estep_ops() - - b_op = self.make_mstep_b_op() - rhoprec_op = self.make_mstep_rhoprec_op() - - s_op = self.make_mstep_S_op() - if self.algorithm == "ECME": - loss_op = -self._make_logp_op() + def loss(x): return -self.logp_margw(X) loss_name = "-Marginal Lik" elif self.algorithm == "ECM": - loss_op = -self._make_Q_op() + def loss(x): return -self.Q_fun_margw(X) loss_name = "-ELPD (Q)" - else: - logger.error("Unknown algorithm %s!" % self.algorithm) - sigma_v_opt = ScipyOptimizerInterface(loss_op, - var_list=self.space_cov.get_optimize_vars(), - method=self.optMethod, - options=self.optCtrl) + def wrapped_Q(Strp, X): + return -self.Q_fun_margw(Strp, X) - sigma_t_opt = ScipyOptimizerInterface(loss_op, - var_list=self.time_cov.get_optimize_vars(), - method=self.optMethod, - options=self.optCtrl) - - sigma_w_opt = ScipyOptimizerInterface(loss_op, - var_list=self.marg_cov.get_optimize_vars(), - method=self.optMethod, - options=self.optCtrl) + lossfn_Q = TensorFlow(wrapped_Q) s_trp_manifold = Stiefel(self.t, self.k) - solver = ConjugateGradient() - - problem = Problem(manifold=s_trp_manifold, cost=loss_op, - arg=self.S_trp, verbosity=1) + self.solver = TrustRegions(logverbosity=0) + self.problem = Problem(manifold=s_trp_manifold, cost=lossfn_Q) - # hacky hack hack to let us maintain state on the things - # we're not pymanopting - problem.backend._session = self.sess + for em_iter in range(max_iter): - self.sess.run(tf.compat.v1.global_variables_initializer()) - - converged = False - for i in range(max_iter): - loss_start = loss_op.eval(session=self.sess) - logger.info("Iter %i, %s at start %f" % (i, loss_name, loss_start)) + q_start = self.Q_fun_margw(self.S, X) + logger.info(f"Iter {em_iter}, {loss_name} at start {q_start}") + print(f"Iter {em_iter}, {loss_name} at start {q_start}") # ESTEP - # compute all the terms with old vals - w_prime_new = w_prime_op.eval(session=self.sess) - rhoprec_prime_new = rhoprec_prime_op.eval(session=self.sess) - wcov_prime_new = wcov_prime_op.eval(session=self.sess) - vcov_prime_new = vcov_prime_op.eval(session=self.sess) - - # for convergence, we check w, rho, and sigma_v (since we - # use them for reconstruction/projection) - w_norm = tf.norm(tensor=w_prime_new - self.w_prime).eval( - session=self.sess) / (self.n*self.v*self.k) - # update (since we reuse wcov_prime in computing w_prime) - self.w_prime.load(w_prime_new, session=self.sess) - self.rhoprec_prime.load(rhoprec_prime_new, session=self.sess) - self.wcov_prime.load(wcov_prime_new, session=self.sess) - self.vcov_prime.load(vcov_prime_new, session=self.sess) - - loss_end_estep = loss_op.eval(session=self.sess) - logger.info("Iter %i, %s at estep end %f" % - (i, loss_name, loss_end_estep)) + self.estep_margw(X) + q_end_estep = self.Q_fun_margw(self.S, X) + logger.info(f"Iter {em_iter}, {loss_name} at estep end {q_end_estep}") + print(f"Iter {em_iter}, {loss_name} at estep end {q_end_estep}") # MSTEP - self.b.load(b_op.eval(session=self.sess), session=self.sess) - - rhoprec_new = rhoprec_op.eval(session=self.sess) - rhoprec_norm = tf.norm(tensor=rhoprec_new - self.rhoprec).eval( - session=self.sess) / self.n - self.rhoprec.load(rhoprec_new, session=self.sess) - - if self.s_constraint == "gaussian": - s_hat = s_op.eval(session=self.sess).T - elif self.s_constraint == "ortho": - if i == 0: - # initial guess it the least squares op - s_hat = solver.solve(problem, x=s_op.eval( - session=self.sess).T) - else: - s_hat = solver.solve(problem, x=self.S_trp.eval( - session=self.sess)) - - self.S_trp.load(s_hat, session=self.sess) - - old_sigma_v = self.space_cov.Sigma.eval(session=self.sess) - - if self.space_noise_cov_class is not CovIdentity: - sigma_v_opt.minimize(session=self.sess) - - sigv_norm = tf.norm(tensor=old_sigma_v - self.space_cov.Sigma).eval( - session=self.sess) / (self.v**2) - - if self.time_noise_cov_class is not CovIdentity: - sigma_t_opt.minimize(session=self.sess) - - if self.marg_cov_class is not CovIdentity: - sigma_w_opt.minimize(session=self.sess) - - loss_end_mstep = loss_op.eval(session=self.sess) - logger.info("Iter %i, %s at mstep end %f" % - (i, loss_name, loss_end_mstep)) - if loss_end_estep > loss_start: - logger.warn("Warning! estep did not improve loss!\ - Instead, worsened by %f" % - (loss_start-loss_end_estep)) - if loss_end_estep > loss_start: - logger.warn("Warning! mstep did not improve loss!\ - Instead, worsened by %f" % - (loss_end_estep-loss_end_mstep)) - - logger.info("Iter %i end, W norm %f, sigV norm %f,\ - rhoprec norm %f" % - (i, w_norm, sigv_norm, rhoprec_norm)) - - delQ = loss_end_mstep - loss_start - if np.max(np.r_[w_norm, sigv_norm, - rhoprec_norm, delQ]) <= convergence_tol: - converged = True - break - - if converged: - logger.info("Converged in %i iterations" % i) - else: - logger.warn("Not converged to tolerance!\ - Results may not be reliable") - self.w_ = self.w_prime.eval(session=self.sess) - self.s_ = self.S.eval(session=self.sess) - self.rho_ = 1/self.rhoprec.eval(session=self.sess) + self.mstep_margw(X) + + q_end_mstep = self.Q_fun_margw(self.S, X) + logger.info("Iter %i, Q at mstep end %f" % (em_iter, q_end_mstep)) + print("Iter %i, Q at mstep end %f" % (em_iter, q_end_mstep)) + assert q_end_estep >= q_start, "Q increased in E-step!" + assert q_end_mstep >= q_end_estep, "Q increased in M-step!" + + # converged = check_convergence() - self.final_loss_ = loss_op.eval(session=self.sess) - self.logp_ = self._make_logp_op().eval(session=self.sess) + # Convergence checks: tol on just delta-loss or + # we check w, rho, and sigma_v (since we + # use them for reconstruction/projection)? + + # if converged: + # logger.info("Converged in %i iterations" % i) + # else: + # logger.warn("Not converged to tolerance!\ + # Results may not be reliable") + self.w_ = self.w_prime.numpy() + self.s_ = self.S.numpy() + self.rho_ = 1/self.rhoprec.numpy() + + self.final_loss_ = q_end_mstep + self.logp_ = self.logp_margw(X) def _condition(self, x): s = np.linalg.svd(x, compute_uv=False) return np.max(s)/np.min(s) - def transform(self, X): - vprec = self.space_cov.Sigma_inv.eval(session=self.sess) - conditions = [self._condition((w.T @ vprec @ w)/r) - for (w, r) in zip(self.w_, self.rho_)] - logger.info(["Condition #s for transformation"] + conditions) - return [np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) - for w, x, r in zip(self.w_, X, self.rho_)] - - def transform_orthow(self, X): - # orthonormalize W - w_ortho = [w @ np.linalg.svd(w.T @ w)[0] / - np.sqrt(np.linalg.svd(w.T @ w)[1]) - for w in self.w_] - vprec = self.space_cov.Sigma_inv.eval(session=self.sess) - conditions = [self._condition((w.T @ vprec @ w)/r) - for (w, r) in zip(self.w_, self.rho_)] + def transform(self, X, ortho_w=False): + if ortho_w: + w_local = [w @ np.linalg.svd( + w.T @ w)[0] / np.sqrt(np.linalg.svd(w.T @ w)[1]) for w in self.w_] + else: + w_local = self.w_ + + vprec_w = [self.space_cov.solve(w).numpy( + ) / r for (w, r) in zip(w_local, self.rhoprec_)] + vprec_x = [self.space_cov.solve(x).numpy( + ) / r for (x, r) in zip(X, self.rhoprec_)] + conditions = [self._condition(w.T @ vw) + for (w, vw) in zip(w_local, self.vprec_w)] logger.info(["Condition #s for transformation"] + conditions) - return [np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) - for w, x, r in zip(self.w_, X, self.rho_)] + return [np.linalg.solve(w.T @ vw, w.T @ vx) for (w, vw, vx) in zip(w_local, vprec_w, vprec_x)] From 3f40de1ef3593a45f37a8d307c93105cb265e48a Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 1 Jan 2021 10:50:29 -0800 Subject: [PATCH 75/84] notation consistency cleanups --- brainiak/matnormal/srm_margs.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/brainiak/matnormal/srm_margs.py b/brainiak/matnormal/srm_margs.py index f91e6d285..3a1786241 100644 --- a/brainiak/matnormal/srm_margs.py +++ b/brainiak/matnormal/srm_margs.py @@ -26,7 +26,7 @@ class MNSRM(BaseEstimator): """Probabilistic SRM, aka SRM with marginalization over S (and orthonormal W). This generalizes SRM (Chen et al. 2015) by allowing arbitrary kronecker-structured residual covariance. Inference is - performed by ECM algorithm. + performed by ECM algorithm. """ def __init__(self, n_features=5, time_noise_cov=CovIdentity, @@ -156,13 +156,13 @@ def fit(self, X, n_iter=10, y=None, w_cov=None, svd_init=True): X = tf.stack(X, name="X") if svd_init: - xsvd = [np.linalg.svd(x) for x in X] + xinit = [np.linalg.svd(x) for x in X] else: - xsvd = [np.linalg.svd(np.random.normal( + xinit = [np.linalg.svd(np.random.normal( size=(self.v, self.t))) for i in range(self.n)] - w_init = [sv[0][:, :self.k] for sv in xsvd] - s_init = np.average([sv[2][:self.k, :] for sv in xsvd], 0) + w_init = [sv[0][:, :self.k] for sv in xinit] + s_init = np.average([sv[2][:self.k, :] for sv in xinit], 0) # parameters self.b = tf.Variable(np.random.normal( @@ -179,7 +179,6 @@ def fit(self, X, n_iter=10, y=None, w_cov=None, svd_init=True): self.tcov_prime = tf.Variable(np.eye(self.t), name="wcov_prime") # Pymanopt setup - # now we fool pymanopt into thinking we prepicked # number of args even though we use varargs def wrapped_Q(*args): From 9a6283282f4a28fd3d94705440772649a169bdda Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 1 Jan 2021 10:51:02 -0800 Subject: [PATCH 76/84] MNSRM-MargS with all options (some combos don't work and/or don't make sense) --- brainiak/matnormal/srm_margw.py | 274 +++++++++++++++++++------------- 1 file changed, 164 insertions(+), 110 deletions(-) diff --git a/brainiak/matnormal/srm_margw.py b/brainiak/matnormal/srm_margw.py index 96f9137db..30bd81a1d 100644 --- a/brainiak/matnormal/srm_margw.py +++ b/brainiak/matnormal/srm_margw.py @@ -17,6 +17,23 @@ logger = logging.getLogger(__name__) +def assert_monotonicity(fun, rtol=1e-3): + """ + Check that the loss is monotonically decreasing + after called function. + tol > 0 allows for some slop due to numerics + """ + def wrapper(classref, *args, **kwargs): + loss_before = classref.lossfn(None) + print(f"loss before {fun} is {loss_before}") + res = fun(classref, *args, **kwargs) + loss_after = classref.lossfn(None) + print(f"loss after {fun} is {loss_after}") + assert loss_after-loss_before <= abs(loss_before*rtol), f"loss increased on {fun}" + return res + return wrapper + + class DPMNSRM(BaseEstimator): """Probabilistic SRM, aka SRM with marginalization over W (and optionally, orthonormal S). In contrast to SRM (Chen et al. 2015), this estimates @@ -27,8 +44,8 @@ class DPMNSRM(BaseEstimator): def __init__(self, n_features=5, time_noise_cov=CovIdentity, space_noise_cov=CovIdentity, w_cov=CovIdentity, - s_constraint="gaussian", optMethod="L-BFGS-B", optCtrl={}, - improvement_tol=1e-5, algorithm="ECM"): + s_constraint="ortho", optMethod="L-BFGS-B", optCtrl={}, + improvement_tol=1e-5, algorithm="ECME"): self.k = n_features self.s_constraint = s_constraint @@ -36,11 +53,14 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.algorithm = algorithm if s_constraint == "ortho": logger.info("Orthonormal S selected") + if w_cov is CovIdentity: + raise RuntimeError("Orthonormal S with w_cov=I makes S not identifiable\ + (since it always appears as an inner product), please use another w_cov") elif s_constraint == "gaussian": logger.info("Gaussian S selected") if w_cov is not CovIdentity: - logger.warn("Gaussian S means w_cov can be I w.l.o.g., using\ - more general covs not recommended") + logger.warn(f"Gaussian S means w_cov can be I w.l.o.g., ignoring passed in\ + w_cov={w_cov}") else: raise RuntimeError( f"Unknown s_constraint! Expected 'ortho' or 'gaussian', got {s_constraint}!") @@ -55,8 +75,12 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.optCtrl, self.optMethod = optCtrl, optMethod - def logp_margw(self, X): - """ MatnormSRM Log-likelihood with marginal""" + def logp(self, X, S=None): + """ MatnormSRM marginal log-likelihood, integrating over W""" + + if S is None: + S = self.S + subj_space_covs = [CovScaleMixin(base_cov=self.space_cov, scale=1/self.rhoprec[j]) for j in range(self.n)] if self.marg_cov_class is CovIdentity: @@ -64,7 +88,7 @@ def logp_margw(self, X): input_tensor=[matnorm_logp_marginal_col(X[j], row_cov=subj_space_covs[j], col_cov=self.time_cov, - marg=self.S, + marg=S, marg_cov=CovIdentity(size=self.k)) for j in range(self.n)], name="lik_logp") @@ -74,7 +98,7 @@ def logp_margw(self, X): row_cov=subj_space_covs[j], col_cov=self.time_cov, marg=tf.matmul( - self.marg_cov.L, self.S), + self.marg_cov.L, S), marg_cov=CovIdentity(size=self.k)) for j in range(self.n)], name="lik_logp") else: @@ -84,17 +108,21 @@ def logp_margw(self, X): input_tensor=[matnorm_logp_marginal_col(X[j], row_cov=subj_space_covs[j], col_cov=self.time_cov, - marg=self.S, + marg=S, marg_cov=self.marg_cov) for j in range(self.n)], name="lik_logp") - def Q_fun_margw(self, Strp, X): + def Q_fun(self, X, S=None): + + if S is None: + S = self.S + # shorthands for readability - kt = self.k * self.t + kpt = self.k + self.t nv = self.n * self.v mean = X - self.b - tf.matmul(self.w_prime, - tf.tile(tf.expand_dims(self.S, 0), + tf.tile(tf.expand_dims(S, 0), [self.n, 1, 1])) # covs don't support batch ops (yet!) (TODO): @@ -115,33 +143,35 @@ def Q_fun_margw(self, Strp, X): if self.s_constraint == "gaussian": s_quad_form = - \ tf.linalg.trace(tf.matmul(self.time_cov.solve( - tf.transpose(a=self.S)), self.S)) - det_terms = -(self.v*self.n+self.k) * self.time_cov.logdet -\ - kt*self.n*self.space_cov.logdet +\ - kt*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ + tf.transpose(a=S)), S)) + det_terms = -(nv+self.k) * self.time_cov.logdet -\ + kpt*self.n*self.space_cov.logdet +\ + kpt*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ nv*self.marg_cov.logdet else: + # s_quad_form = -tf.linalg.trace(self.time_cov._prec) s_quad_form = 0 - det_terms = -(self.v*self.n)*self.time_cov.logdet -\ - kt*self.n*self.space_cov.logdet +\ - kt*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ + det_terms = -nv*self.time_cov.logdet -\ + (self.n+self.t)*self.space_cov.logdet +\ + self.t*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ nv*self.marg_cov.logdet trace_prod = -tf.reduce_sum(input_tensor=self.rhoprec / self.rhoprec_prime) *\ tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) *\ (tf.linalg.trace(tf.matmul(self.wcov_prime, self.marg_cov._prec + - tf.matmul(self.S, self.time_cov.solve( - tf.transpose(a=self.S)))))) + tf.matmul(S, self.time_cov.solve( + tf.transpose(a=S)))))) return 0.5 * (det_terms + x_quad_form + w_quad_form + trace_prod + s_quad_form) - + + @assert_monotonicity def estep_margw(self, X): - wchol = tf.linalg.cholesky(self.marg_cov._prec + + wchol = tf.linalg.cholesky(tf.eye(self.k, dtype=tf.float64) + tf.matmul(self.S, self.time_cov.solve( tf.transpose(a=self.S)))) @@ -155,36 +185,43 @@ def estep_margw(self, X): # rhoprec doesn't change # vcov doesn't change - self.w_prime.assign(w_prime) - self.wcov_prime.assign(wcov_prime) - + self.w_prime.assign(w_prime, read_value=False) + self.wcov_prime.assign(wcov_prime, read_value=False) + @assert_monotonicity def mstep_b_margw(self, X): - return tf.expand_dims(tf.reduce_sum( - input_tensor=[self.time_cov.solve(tf.transpose(a=X[j] - - tf.matmul(self.w_prime[j], self.S))) - for j in range(self.n)], axis=1) / - tf.reduce_sum(input_tensor=self.time_cov._prec), -1) - - def mstep_S_nonortho(self, X): - wtw = tf.reduce_sum( - input_tensor=[tf.matmul(self.w_prime[j], - self.space_cov.solve( - self.w_prime[j]), - transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], axis=0) - - wtx = tf.reduce_sum( - input_tensor=[tf.matmul(self.w_prime[j], - self.space_cov.solve( - X[j]-self.b[j]), - transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], axis=0) - - return tf.linalg.solve(wtw + tf.reduce_sum(input_tensor=self.rhoprec_prime / self.rhoprec) * - tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) * - self.wcov_prime + tf.eye(self.k, dtype=tf.float64), wtx) + resids_transpose = [tf.transpose(X[j] - self.w_prime[j] @ self.S) for j in range(self.n)] + numerator = [tf.reduce_sum(tf.transpose(self.time_cov.solve(r)), axis=1) for r in resids_transpose] + denominator = tf.reduce_sum(self.time_cov._prec) + + self.b.assign(tf.stack([n/denominator for n in numerator])[...,None], read_value=False) + + @assert_monotonicity + def mstep_S(self, X): + if self.s_constraint == "gaussian": + wtw = tf.reduce_sum( + input_tensor=[tf.matmul(self.w_prime[j], + self.space_cov.solve( + self.w_prime[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], axis=0) + + wtx = tf.reduce_sum( + input_tensor=[tf.matmul(self.w_prime[j], + self.space_cov.solve( + X[j]-self.b[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], axis=0) + + self.S.assign(tf.linalg.solve(wtw + tf.reduce_sum(input_tensor=self.rhoprec_prime / self.rhoprec) * + tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) * + self.wcov_prime + tf.eye(self.k, dtype=tf.float64), wtx), read_value=False) + + elif self.s_constraint == "ortho": + new_Strp = self.solver.solve(self.problem, x=self.S.numpy().T) + self.S.assign(new_Strp.T, read_value=False) + @assert_monotonicity def mstep_rhoprec_margw(self, X): mean = X - self.b -\ @@ -205,34 +242,19 @@ def mstep_rhoprec_margw(self, X): shared_term = (1/self.rhoprec_prime) *\ tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) *\ - tf.linalg.trace(tf.matmul(self.wcov_prime, - self.marg_cov._prec + - tf.matmul(self.S, - self.time_cov.solve( - tf.transpose(a=self.S))))) + (tf.linalg.trace(self.marg_cov.solve(self.wcov_prime)) + + tf.linalg.trace(self.S @ self.time_cov.solve(tf.transpose(self.S)))) + rho_hat_unscaled = mean_trace + w_trace + shared_term - return (self.v*(self.k+self.t)) / rho_hat_unscaled + self.rhoprec.assign((self.v*(self.k+self.t)) / rho_hat_unscaled, read_value=False) - def mstep_margw(self, X): - # closed form parts - self.b = self.mstep_b_margw(X) - self.rhoprec = self.mstep_rhoprec_margw(X) - - # optimization parts: - # Stiefel manifold for orthonormal S (if ortho_s) - if self.s_constraint == "ortho": - new_Strp = self.solver.solve(self.problem, x=self.S.numpy().T) - self.S.assign(new_Strp.T) - else: - # if it's not ortho, it's just least squares update - self.S.assign(self.mstep_S_nonortho(X)) - # L-BFGS for residual covs + @assert_monotonicity + def mstep_covs(self): for cov in [self.space_cov, self.time_cov, self.marg_cov]: if len(cov.get_optimize_vars()) > 0: - def lossfn(Q): return -self.Q_fun_margw(self.S, X) val_and_grad = make_val_and_grad( - lossfn, cov.get_optimize_vars()) + self.lossfn, cov.get_optimize_vars()) x0 = pack_trainable_vars(cov.get_optimize_vars()) @@ -240,7 +262,19 @@ def lossfn(Q): return -self.Q_fun_margw(self.S, X) fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, **self.optCtrl ) - assert opt_results.success, "L-BFGS for covariances failed!" + assert opt_results.success, f"L-BFGS for covariances failed with message: {opt_results.message}" + + def mstep_margw(self, X): + # closed form parts + self.mstep_b_margw(X) + # self.mstep_rhoprec_margw(X) + + # optimization parts: + # Stiefel manifold for orthonormal S (if ortho_s) + self.mstep_S(X) + + # L-BFGS for residual covs + self.mstep_covs() def _init_vars(self, X, svd_init=False): self.n = len(X) @@ -272,7 +306,7 @@ def _init_vars(self, X, svd_init=False): self.wcov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") self.vcov_prime = tf.Variable(np.eye(self.v), name="vcov_prime") - def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): + def fit(self, X, max_iter=10, y=None, svd_init=False, rtol=1e-3, gtol=1e-7): """ find S marginalizing W @@ -286,62 +320,82 @@ def fit(self, X, max_iter=10, y=None, convergence_tol=1e-3): # in case we get a list, and/or int16s or float32s X = np.array(X).astype(np.float64) - self._init_vars(X) + self._init_vars(X, svd_init=svd_init) if self.algorithm == "ECME": - def loss(x): return -self.logp_margw(X) + self.lossfn = lambda theta: -self.logp(X) + _loss_pymanopt = lambda Strp: -self.logp(X, tf.transpose(Strp)) loss_name = "-Marginal Lik" elif self.algorithm == "ECM": - def loss(x): return -self.Q_fun_margw(X) + self.lossfn = lambda theta: -self.Q_fun(X) + _loss_pymanopt = lambda Strp: -self.Q_fun(X, tf.transpose(Strp)) loss_name = "-ELPD (Q)" - def wrapped_Q(Strp, X): - return -self.Q_fun_margw(Strp, X) - - lossfn_Q = TensorFlow(wrapped_Q) + loss_pymanopt = TensorFlow(_loss_pymanopt) s_trp_manifold = Stiefel(self.t, self.k) - self.solver = TrustRegions(logverbosity=0) - self.problem = Problem(manifold=s_trp_manifold, cost=lossfn_Q) - + self.solver = TrustRegions() + self.problem = Problem(manifold=s_trp_manifold, cost=loss_pymanopt) + + prevloss = self.lossfn(None) + converged = False for em_iter in range(max_iter): - q_start = self.Q_fun_margw(self.S, X) - logger.info(f"Iter {em_iter}, {loss_name} at start {q_start}") - print(f"Iter {em_iter}, {loss_name} at start {q_start}") + logger.info(f"Iter {em_iter}, {loss_name} at start {prevloss}") + # print(f"Iter {em_iter}, {loss_name} at start {q_start}") # ESTEP self.estep_margw(X) - q_end_estep = self.Q_fun_margw(self.S, X) - logger.info(f"Iter {em_iter}, {loss_name} at estep end {q_end_estep}") - print(f"Iter {em_iter}, {loss_name} at estep end {q_end_estep}") - + currloss = self.lossfn(None) + logger.info(f"Iter {em_iter}, {loss_name} at estep end {currloss}") + print(f"Iter {em_iter}, {loss_name} at estep end {currloss}") + assert currloss - prevloss <= 0.1 , f"{loss_name} increased in E-step!" + prevloss = currloss # MSTEP self.mstep_margw(X) - q_end_mstep = self.Q_fun_margw(self.S, X) - logger.info("Iter %i, Q at mstep end %f" % (em_iter, q_end_mstep)) - print("Iter %i, Q at mstep end %f" % (em_iter, q_end_mstep)) - assert q_end_estep >= q_start, "Q increased in E-step!" - assert q_end_mstep >= q_end_estep, "Q increased in M-step!" - - # converged = check_convergence() - - # Convergence checks: tol on just delta-loss or - # we check w, rho, and sigma_v (since we - # use them for reconstruction/projection)? - - # if converged: - # logger.info("Converged in %i iterations" % i) - # else: - # logger.warn("Not converged to tolerance!\ - # Results may not be reliable") + currloss = self.lossfn(None) + logger.info(f"Iter {em_iter}, {loss_name} at mstep end {currloss}") + print("Iter %i, Q at mstep end %f" % (em_iter, currloss)) + currloss = self.lossfn(None) + assert currloss - prevloss <= 0.1, f"{loss_name} increased in M-step!" + + if prevloss - currloss < abs(rtol * prevloss): + break + converged = True + converged_reason = "rtol" + elif self._loss_gradnorm() < gtol: + break + converged = True + converged_reason = "gtol" + + if converged: + logger.info(f"Converged in {em_iter} iterations with by metric {converged_reason}") + else: + logger.warn("Not converged to tolerance!\ + Results may not be reliable") self.w_ = self.w_prime.numpy() self.s_ = self.S.numpy() self.rho_ = 1/self.rhoprec.numpy() - self.final_loss_ = q_end_mstep - self.logp_ = self.logp_margw(X) + self.final_loss_ = self.lossfn(None) + self.logp_ = self.logp(X) + + def _loss_gradnorm(self): + + params = [self.S, self.rhoprec] +\ + self.space_cov.get_optimize_vars() +\ + self.time_cov.get_optimize_vars() +\ + self.marg_cov.get_optimize_vars() + if self.algorithm == "ECM": + # if ECME, marginal likelihood is independent + # of W sufficient statistic + params.append(self.w_prime) + + val_and_grad = make_val_and_grad(self.lossfn, params) + packed_params = pack_trainable_vars(params) + _, grad = val_and_grad(packed_params) + return np.linalg.norm(grad, np.inf) def _condition(self, x): s = np.linalg.svd(x, compute_uv=False) From fb552a78d1b472fb76153f59feb74704ff461d04 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 1 Jan 2021 10:51:46 -0800 Subject: [PATCH 77/84] remove logging calls that would indirectly lead to explicit inverses --- brainiak/matnormal/matnormal_likelihoods.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/brainiak/matnormal/matnormal_likelihoods.py b/brainiak/matnormal/matnormal_likelihoods.py index 55d3f3dff..47ce2b607 100644 --- a/brainiak/matnormal/matnormal_likelihoods.py +++ b/brainiak/matnormal/matnormal_likelihoods.py @@ -62,8 +62,6 @@ def solve_det_marginal(x, sigma, A, Q): logging.DEBUG, f"lemma_factor condition={lemma_cond}", ) - logging.log(logging.DEBUG, f"Q condition={_condition(Q._cov)}") - logging.log(logging.DEBUG, f"sigma condition={_condition(sigma._cov)}") logging.log( logging.DEBUG, f"sigma max={tf.reduce_max(input_tensor=A)}," + From dcf026ef06e6bd3ba0b06faa349eb9f6727888df Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 1 Jan 2021 10:52:44 -0800 Subject: [PATCH 78/84] full complement of tests (incl nonidentifiable ones) --- tests/matnormal/test_matnormal_srm.py | 71 ++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 13 deletions(-) diff --git a/tests/matnormal/test_matnormal_srm.py b/tests/matnormal/test_matnormal_srm.py index f89d35c25..75749c5df 100644 --- a/tests/matnormal/test_matnormal_srm.py +++ b/tests/matnormal/test_matnormal_srm.py @@ -1,13 +1,17 @@ +import pytest from brainiak.matnormal.srm_margs import MNSRM +from brainiak.matnormal.srm_margw import DPMNSRM +from brainiak.matnormal.covs import CovIdentity, CovAR1, CovIsotropic, CovUnconstrainedCholesky import numpy as np +import itertools -from scipy.stats import pearsonr +from scipy.stats import pearsonr, wishart import tensorflow as tf - -def test_mnsrm_margs(): +@pytest.fixture +def mnsrm_fakedata(): np.random.seed(1) tf.random.set_seed(1) voxels = 10 @@ -15,6 +19,14 @@ def test_mnsrm_margs(): subjects = 2 features = 3 + def make_noise(noise_distr='iid', noise_scale=0.1): + if noise_distr == 'iid': + noise = noise_scale*np.random.random((voxels, samples)) + elif noise_distr == "unconstrained": + space_chol = np.linalg.cholesky(wishart.rvs(df=voxels+2, scale=np.eye(voxels))) + time_chol = np.linalg.cholesky(wishart.rvs(df=samples+2, scale=np.eye(samples))) + noise = noise_scale * space_chol @ np.random.random((voxels, samples)) @ time_chol + return noise # Create a Shared response S with K = 3 theta = np.linspace(-4 * np.pi, 4 * np.pi, samples) z = np.linspace(-2, 2, samples) @@ -30,13 +42,25 @@ def test_mnsrm_margs(): for subject in range(subjects): Q, R = np.linalg.qr(np.random.random((voxels, features))) W.append(Q) - X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples))) + X.append(Q.dot(S) + make_noise()) + + data = X, W, S + sizes = voxels, samples, features, subjects + return data, sizes + + +@pytest.mark.parametrize("svd_init", [True, False]) +def test_mnsrm_margs(mnsrm_fakedata, svd_init): + # Test that MNSRM-MargS + + data, sizes = mnsrm_fakedata + X, W, S = data + voxels, samples, features, subjects = sizes model = MNSRM(n_features=features) assert model, "Cannot instantiate MNSRM!" - # test that it works without svd init - model.fit(X, n_iter=5, svd_init=False) + model.fit(X, n_iter=5, svd_init=svd_init) assert model.s_.shape == (features, samples), "S wrong shape!" @@ -50,12 +74,32 @@ def test_mnsrm_margs(): corrs = [pearsonr(r.flatten(), x.flatten())[0] for r, x in zip(reconstructions, X)] for corr in corrs: - assert corr > 0.9, "Reconstruction with svd_init=False is bad! " - - model = MNSRM(n_features=features) - - # test that it works with svd init - model.fit(X, n_iter=5, svd_init=True) + assert corr > 0.9, f"Reconstruction with svd_init={svd_init} is bad!" + + +@pytest.mark.parametrize("svd_init,algo,s_constraint,space_cov,time_cov", + itertools.product([True, False], ["ECM", "ECME"], + ['gaussian','ortho'], [CovIdentity,CovIsotropic], + [CovIdentity, CovAR1])) +def test_mnsrm_margw(mnsrm_fakedata, svd_init, algo, s_constraint, + space_cov, time_cov): + """ DPMNSRM test + """ + + data, sizes = mnsrm_fakedata + X, W, S = data + voxels, samples, features, subjects = sizes + + if s_constraint == "ortho": + w_cov = CovUnconstrainedCholesky + else: + w_cov = CovIdentity + + model = DPMNSRM(n_features=features, + s_constraint=s_constraint, algorithm=algo, + time_noise_cov=time_cov, w_cov=w_cov, space_noise_cov=space_cov) + assert model, "Cannot instantiate DPMNSRM!" + model.fit(X, max_iter=10, svd_init=svd_init, rtol=0.01, gtol=1e-3) assert model.s_.shape == (features, samples), "S wrong shape!" @@ -69,4 +113,5 @@ def test_mnsrm_margs(): corrs = [pearsonr(r.flatten(), x.flatten())[0] for r, x in zip(reconstructions, X)] for corr in corrs: - assert corr > 0.9, "Reconstruction svd_init=True is bad! " + assert corr > 0.8, f"Reconstruction corr={corr}<0.8 (svd_init={svd_init} algo={algo} s_constraint={s_constraint} space_cov={space_cov} time_cov={time_cov})" + \ No newline at end of file From 6e12707e6011dd15b7eb2873b7199cf3e545007d Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 1 Jan 2021 10:54:37 -0800 Subject: [PATCH 79/84] remove srm-margs for now due to pymanopt dependency issue --- brainiak/matnormal/srm_margs.py | 251 -------------------------------- 1 file changed, 251 deletions(-) delete mode 100644 brainiak/matnormal/srm_margs.py diff --git a/brainiak/matnormal/srm_margs.py b/brainiak/matnormal/srm_margs.py deleted file mode 100644 index 3a1786241..000000000 --- a/brainiak/matnormal/srm_margs.py +++ /dev/null @@ -1,251 +0,0 @@ -import inspect -import logging -import warnings - -from brainiak.matnormal.covs import CovIdentity -from brainiak.matnormal.utils import make_val_and_grad, pack_trainable_vars - -import numpy as np - -from pymanopt import Problem -from pymanopt.function import TensorFlow -from pymanopt.manifolds import Product, Stiefel -from pymanopt.solvers import TrustRegions - -from scipy.optimize import minimize - -from sklearn.base import BaseEstimator - -import tensorflow as tf - - -logger = logging.getLogger(__name__) - - -class MNSRM(BaseEstimator): - """Probabilistic SRM, aka SRM with marginalization over S (and - orthonormal W). This generalizes SRM (Chen et al. 2015) by allowing - arbitrary kronecker-structured residual covariance. Inference is - performed by ECM algorithm. - """ - - def __init__(self, n_features=5, time_noise_cov=CovIdentity, - space_noise_cov=CovIdentity, s_cov=CovIdentity, - optMethod="L-BFGS-B", optCtrl={}): - - self.k = n_features - - self.time_noise_cov_class = time_noise_cov - self.space_noise_cov_class = space_noise_cov - self.marg_cov_class = s_cov - - self.optCtrl, self.optMethod = optCtrl, optMethod - - def Q_fun(self, W, X): - """ - Q function for ECM algorithm - """ - - mean = X - self.b - \ - tf.matmul(W, tf.tile(tf.expand_dims( - self.s_prime, 0), [self.n, 1, 1])) - - det_terms = -(self.v*self.n + self.k)*self.time_cov.logdet -\ - (self.t*self.n)*self.space_cov.logdet -\ - self.t*self.marg_cov.logdet +\ - (self.t*self.v) * \ - tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) - - # used twice below - trace_t_t = tf.linalg.trace(self.time_cov.solve(self.tcov_prime)) - - # covs don't support batch ops (yet!) (TODO): - x_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(self.time_cov.solve(tf.transpose(a=mean[j])), - self.space_cov.solve(mean[j]))*self.rhoprec[j] - for j in range(self.n)], axis=0)) - - w_quad_form = -tf.linalg.trace(tf.reduce_sum(input_tensor=[tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=W[j])), - self.space_cov.solve(W[j]))*self.rhoprec[j] - for j in range(self.n)], axis=0)) * trace_t_t - - s_quad_form = -tf.linalg.trace(tf.matmul(self.time_cov.solve(tf.transpose(a=self.s_prime)), - self.marg_cov.solve(self.s_prime))) - - sig_trace_prod = -trace_t_t * \ - tf.linalg.trace(self.marg_cov.solve(self.scov_prime)) - - return 0.5 * (det_terms + x_quad_form + s_quad_form + w_quad_form + sig_trace_prod) - - def estep(self, X): - """ - Compute expectation of the log posterior density (aka complete data log-likelihood) - for ECM. - """ - - tcov_prime = self.time_cov - Xmb = X - self.b - - sprec_chol = tf.linalg.cholesky(self.marg_cov._prec + tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose( - a=self.W[j]), self.space_cov.solve(self.W[j]))*self.rhoprec[j] for j in range(self.n)], axis=0)) - - wsig_x = tf.reduce_sum(input_tensor=[tf.matmul(tf.transpose( - a=self.W[j]), self.space_cov.solve(Xmb[j]))*self.rhoprec[j] for j in range(self.n)], axis=0) - - scov_prime = tf.linalg.cholesky_solve( - sprec_chol, tf.eye(self.k, dtype=tf.float64)) - - s_prime = tf.linalg.cholesky_solve(sprec_chol, wsig_x) - - return s_prime, scov_prime, tcov_prime._cov - - def mstep_b(self, X): - """ - Update b (intercept term) as part of M-step. - """ - - return tf.expand_dims(tf.reduce_sum(input_tensor=[self.time_cov.solve(tf.transpose(a=X[j] - tf.matmul(self.W[j], self.s_prime))) - for j in range(self.n)], axis=1) / - tf.reduce_sum(input_tensor=self.time_cov._prec), -1) - - def mstep_rhoprec(self, X): - """ - Update rho^-1 (subject-wise precision scalers) as part of M-step. - """ - - mean = X - self.b - \ - tf.matmul(self.W, tf.tile(tf.expand_dims( - self.s_prime, 0), [self.n, 1, 1])) - - mean_trace = tf.stack([tf.linalg.trace(tf.matmul(self.time_cov.solve(tf.transpose(a=mean[j])), - self.space_cov.solve(mean[j]))) for j in range(self.n)]) - - trace_t_t = tf.linalg.trace(self.time_cov.solve(self.tcov_prime)) - - w_trace = trace_t_t * tf.stack([tf.linalg.trace(tf.matmul(tf.matmul(self.scov_prime, tf.transpose(a=self.W[j])), - self.space_cov.solve(self.W[j]))) for j in range(self.n)]) - - rho_hat_unscaled = mean_trace + w_trace - - return (self.v*self.t) / rho_hat_unscaled - - def fit(self, X, n_iter=10, y=None, w_cov=None, svd_init=True): - """ - Fit SRM by ECM marginalizing over S. - - Parameters - ---------- - X: 2d array - Brain data matrix (voxels by TRs). Y in the math - n_iter: int, default=10 - Number of ECM iterations to run - y: None - Ignored (just here for sklearn API compatibility) - w_cov : CovBase, default = CovIdentity - Prior covariance of the columns of W. - svd_init : bool, default=True - If true, initialize to the W_i left singular vectors of - X_i and S to the average of the right singular vectors - over all subjects. If false, initialize to random orthonormal - matrices. - """ - - self.n = len(X) - - self.v, self.t = X[0].shape - - X = tf.stack(X, name="X") - - if svd_init: - xinit = [np.linalg.svd(x) for x in X] - else: - xinit = [np.linalg.svd(np.random.normal( - size=(self.v, self.t))) for i in range(self.n)] - - w_init = [sv[0][:, :self.k] for sv in xinit] - s_init = np.average([sv[2][:self.k, :] for sv in xinit], 0) - - # parameters - self.b = tf.Variable(np.random.normal( - size=(self.n, self.v, 1)), name="b") - self.rhoprec = tf.Variable(np.ones(self.n), name="rhoprec") - self.W = tf.Variable(tf.stack([_w for _w in w_init])) - self.space_cov = self.space_noise_cov_class(size=self.v) - self.time_cov = self.time_noise_cov_class(size=self.t) - self.marg_cov = self.time_noise_cov_class(size=self.k) - - # sufficient statistics - self.s_prime = tf.Variable(s_init, dtype=tf.float64, name="s_prime") - self.scov_prime = tf.Variable(np.eye(self.k), name="wcov_prime") - self.tcov_prime = tf.Variable(np.eye(self.t), name="wcov_prime") - - # Pymanopt setup - # now we fool pymanopt into thinking we prepicked - # number of args even though we use varargs - def wrapped_Q(*args): - return -self.Q_fun(args, X) - - sig = inspect.signature(wrapped_Q) - newparams = [inspect.Parameter( - f"w_{i}", inspect.Parameter.POSITIONAL_ONLY) for i in range(self.n)] - newsig = sig.replace(parameters=newparams) - wrapped_Q.__signature__ = newsig - lossfn_Q = TensorFlow(wrapped_Q) - - w_manifold = Product([Stiefel(self.v, self.k) for i in range(self.n)]) - solver = TrustRegions(logverbosity=0) - w_problem = Problem(manifold=w_manifold, cost=lossfn_Q) - - for em_iter in range(n_iter): - q_start = self.Q_fun(self.W, X) - logger.info("Iter %i, Q at start %f" % (em_iter, q_start)) - - # ESTEP - # compute all the terms with old vals - s_prime_new, scov_prime_new, _ = self.estep(X) - self.s_prime.assign(s_prime_new, read_value=False) - self.scov_prime.assign(scov_prime_new, read_value=False) - # don't assign tcov since it is not updated in margS SRM - - q_end_estep = self.Q_fun(self.W, X) - logger.info("Iter %i, Q at estep end %f" % (em_iter, q_end_estep)) - - # MSTEP - - # closed form parts - self.b = self.mstep_b(X) - self.rhoprec = self.mstep_rhoprec(X) - - # optimization parts: - # Stiefel manifold for orthonormal W - new_w = solver.solve( - w_problem, x=[self.W[i].numpy() for i in range(self.n)]) - - self.W.assign(new_w, read_value=False) - - # L-BFGS for residual covs - for cov in [self.space_cov, self.time_cov, self.marg_cov]: - if len(cov.get_optimize_vars()) > 0: - def lossfn(Q): return -self.Q_fun(self.W, X) - val_and_grad = make_val_and_grad( - lossfn, cov.get_optimize_vars()) - - x0 = pack_trainable_vars(cov.get_optimize_vars()) - - opt_results = minimize( - fun=val_and_grad, x0=x0, jac=True, method=self.optMethod, - **self.optCtrl - ) - assert opt_results.success, "L-BFGS for covariances failed!" - - q_end_mstep = self.Q_fun(self.W, X) - logger.info("Iter %i, Q at mstep end %f" % (em_iter, q_end_mstep)) - assert q_end_estep >= q_start, "Q increased in E-step!" - assert q_end_mstep >= q_end_estep, "Q increased in M-step!" - - self.w_ = [self.W[i].numpy() for i in range(self.n)] - self.s_ = self.s_prime.numpy() - self.rho_ = 1/self.rhoprec.numpy() - - def transform(self, X): - vprec = self.space_cov._prec.numpy() - return np.array([np.linalg.solve((w.T @ vprec @ w)/r, (w.T @ vprec @ x)/r) for w, x, r in zip(self.w_, X, self.rho_)]) From 1eb5e7d501ac04383397761ed6ad17fda04a676a Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Fri, 1 Jan 2021 10:55:13 -0800 Subject: [PATCH 80/84] rename --- brainiak/matnormal/{srm_margw.py => dpsrm.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename brainiak/matnormal/{srm_margw.py => dpsrm.py} (100%) diff --git a/brainiak/matnormal/srm_margw.py b/brainiak/matnormal/dpsrm.py similarity index 100% rename from brainiak/matnormal/srm_margw.py rename to brainiak/matnormal/dpsrm.py From 3a5d13af6a0fb940ea5c989050569204942ead61 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 27 Jan 2021 17:54:41 -0800 Subject: [PATCH 81/84] remove ortho-s for initial version --- brainiak/matnormal/dpsrm.py | 135 ++++++++++-------------------------- 1 file changed, 36 insertions(+), 99 deletions(-) diff --git a/brainiak/matnormal/dpsrm.py b/brainiak/matnormal/dpsrm.py index 30bd81a1d..9fad68aa4 100644 --- a/brainiak/matnormal/dpsrm.py +++ b/brainiak/matnormal/dpsrm.py @@ -1,7 +1,4 @@ import tensorflow as tf -from pymanopt import Problem -from pymanopt.manifolds import Stiefel -from pymanopt.solvers import TrustRegions from sklearn.base import BaseEstimator from brainiak.matnormal.covs import (CovIdentity, CovScaleMixin, @@ -11,7 +8,6 @@ matnorm_logp_marginal_col) from brainiak.matnormal.utils import pack_trainable_vars, make_val_and_grad import logging -from pymanopt.function import TensorFlow from scipy.optimize import minimize logger = logging.getLogger(__name__) @@ -43,27 +39,15 @@ class DPMNSRM(BaseEstimator): """ def __init__(self, n_features=5, time_noise_cov=CovIdentity, - space_noise_cov=CovIdentity, w_cov=CovIdentity, - s_constraint="ortho", optMethod="L-BFGS-B", optCtrl={}, + space_noise_cov=CovIdentity, + optMethod="L-BFGS-B", optCtrl={}, improvement_tol=1e-5, algorithm="ECME"): self.k = n_features - self.s_constraint = s_constraint + # self.s_constraint = s_constraint self.improvement_tol = improvement_tol self.algorithm = algorithm - if s_constraint == "ortho": - logger.info("Orthonormal S selected") - if w_cov is CovIdentity: - raise RuntimeError("Orthonormal S with w_cov=I makes S not identifiable\ - (since it always appears as an inner product), please use another w_cov") - elif s_constraint == "gaussian": - logger.info("Gaussian S selected") - if w_cov is not CovIdentity: - logger.warn(f"Gaussian S means w_cov can be I w.l.o.g., ignoring passed in\ - w_cov={w_cov}") - else: - raise RuntimeError( - f"Unknown s_constraint! Expected 'ortho' or 'gaussian', got {s_constraint}!") + self.marg_cov_class = CovIdentity if algorithm not in ["ECM", "ECME"]: raise RuntimeError( @@ -71,7 +55,6 @@ def __init__(self, n_features=5, time_noise_cov=CovIdentity, self.time_noise_cov_class = time_noise_cov self.space_noise_cov_class = space_noise_cov - self.marg_cov_class = w_cov self.optCtrl, self.optMethod = optCtrl, optMethod @@ -83,34 +66,13 @@ def logp(self, X, S=None): subj_space_covs = [CovScaleMixin(base_cov=self.space_cov, scale=1/self.rhoprec[j]) for j in range(self.n)] - if self.marg_cov_class is CovIdentity: - return tf.reduce_sum( - input_tensor=[matnorm_logp_marginal_col(X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=S, - marg_cov=CovIdentity(size=self.k)) - for j in range(self.n)], name="lik_logp") - - elif self.marg_cov_class is CovUnconstrainedCholesky: - return tf.reduce_sum( - input_tensor=[matnorm_logp_marginal_col(X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=tf.matmul( - self.marg_cov.L, S), - marg_cov=CovIdentity(size=self.k)) - for j in range(self.n)], name="lik_logp") - else: - logger.warn("ECME with cov that is not identity or unconstrained may\ - yield numerical instabilities! Use ECM for now.") - return tf.reduce_sum( - input_tensor=[matnorm_logp_marginal_col(X[j], - row_cov=subj_space_covs[j], - col_cov=self.time_cov, - marg=S, - marg_cov=self.marg_cov) - for j in range(self.n)], name="lik_logp") + return tf.reduce_sum( + input_tensor=[matnorm_logp_marginal_col(X[j], + row_cov=subj_space_covs[j], + col_cov=self.time_cov, + marg=S, + marg_cov=CovIdentity(size=self.k)) + for j in range(self.n)], name="lik_logp") def Q_fun(self, X, S=None): @@ -140,21 +102,13 @@ def Q_fun(self, X, S=None): self.rhoprec[j] for j in range(self.n)], axis=0)) - if self.s_constraint == "gaussian": - s_quad_form = - \ - tf.linalg.trace(tf.matmul(self.time_cov.solve( - tf.transpose(a=S)), S)) - det_terms = -(nv+self.k) * self.time_cov.logdet -\ - kpt*self.n*self.space_cov.logdet +\ - kpt*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ - nv*self.marg_cov.logdet - else: - # s_quad_form = -tf.linalg.trace(self.time_cov._prec) - s_quad_form = 0 - det_terms = -nv*self.time_cov.logdet -\ - (self.n+self.t)*self.space_cov.logdet +\ - self.t*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ - nv*self.marg_cov.logdet + s_quad_form = - \ + tf.linalg.trace(tf.matmul(self.time_cov.solve( + tf.transpose(a=S)), S)) + det_terms = -(nv+self.k) * self.time_cov.logdet -\ + kpt*self.n*self.space_cov.logdet +\ + kpt*self.v*tf.reduce_sum(input_tensor=tf.math.log(self.rhoprec)) -\ + nv*self.marg_cov.logdet trace_prod = -tf.reduce_sum(input_tensor=self.rhoprec / self.rhoprec_prime) *\ tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) *\ @@ -198,28 +152,23 @@ def mstep_b_margw(self, X): @assert_monotonicity def mstep_S(self, X): - if self.s_constraint == "gaussian": - wtw = tf.reduce_sum( - input_tensor=[tf.matmul(self.w_prime[j], - self.space_cov.solve( - self.w_prime[j]), - transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], axis=0) - - wtx = tf.reduce_sum( - input_tensor=[tf.matmul(self.w_prime[j], - self.space_cov.solve( - X[j]-self.b[j]), - transpose_a=True) * - self.rhoprec[j] for j in range(self.n)], axis=0) - - self.S.assign(tf.linalg.solve(wtw + tf.reduce_sum(input_tensor=self.rhoprec_prime / self.rhoprec) * - tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) * - self.wcov_prime + tf.eye(self.k, dtype=tf.float64), wtx), read_value=False) - - elif self.s_constraint == "ortho": - new_Strp = self.solver.solve(self.problem, x=self.S.numpy().T) - self.S.assign(new_Strp.T, read_value=False) + wtw = tf.reduce_sum( + input_tensor=[tf.matmul(self.w_prime[j], + self.space_cov.solve( + self.w_prime[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], axis=0) + + wtx = tf.reduce_sum( + input_tensor=[tf.matmul(self.w_prime[j], + self.space_cov.solve( + X[j]-self.b[j]), + transpose_a=True) * + self.rhoprec[j] for j in range(self.n)], axis=0) + + self.S.assign(tf.linalg.solve(wtw + tf.reduce_sum(input_tensor=self.rhoprec_prime / self.rhoprec) * + tf.linalg.trace(self.space_cov.solve(self.vcov_prime)) * + self.wcov_prime + tf.eye(self.k, dtype=tf.float64), wtx), read_value=False) @assert_monotonicity def mstep_rhoprec_margw(self, X): @@ -267,10 +216,7 @@ def mstep_covs(self): def mstep_margw(self, X): # closed form parts self.mstep_b_margw(X) - # self.mstep_rhoprec_margw(X) - - # optimization parts: - # Stiefel manifold for orthonormal S (if ortho_s) + self.mstep_rhoprec_margw(X) self.mstep_S(X) # L-BFGS for residual covs @@ -324,18 +270,11 @@ def fit(self, X, max_iter=10, y=None, svd_init=False, rtol=1e-3, gtol=1e-7): if self.algorithm == "ECME": self.lossfn = lambda theta: -self.logp(X) - _loss_pymanopt = lambda Strp: -self.logp(X, tf.transpose(Strp)) loss_name = "-Marginal Lik" elif self.algorithm == "ECM": self.lossfn = lambda theta: -self.Q_fun(X) - _loss_pymanopt = lambda Strp: -self.Q_fun(X, tf.transpose(Strp)) loss_name = "-ELPD (Q)" - loss_pymanopt = TensorFlow(_loss_pymanopt) - - s_trp_manifold = Stiefel(self.t, self.k) - self.solver = TrustRegions() - self.problem = Problem(manifold=s_trp_manifold, cost=loss_pymanopt) prevloss = self.lossfn(None) converged = False @@ -348,7 +287,6 @@ def fit(self, X, max_iter=10, y=None, svd_init=False, rtol=1e-3, gtol=1e-7): self.estep_margw(X) currloss = self.lossfn(None) logger.info(f"Iter {em_iter}, {loss_name} at estep end {currloss}") - print(f"Iter {em_iter}, {loss_name} at estep end {currloss}") assert currloss - prevloss <= 0.1 , f"{loss_name} increased in E-step!" prevloss = currloss # MSTEP @@ -356,7 +294,6 @@ def fit(self, X, max_iter=10, y=None, svd_init=False, rtol=1e-3, gtol=1e-7): currloss = self.lossfn(None) logger.info(f"Iter {em_iter}, {loss_name} at mstep end {currloss}") - print("Iter %i, Q at mstep end %f" % (em_iter, currloss)) currloss = self.lossfn(None) assert currloss - prevloss <= 0.1, f"{loss_name} increased in M-step!" From dc476be04e88454a07bcc5aa0cbb416b156df26e Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 27 Jan 2021 17:55:18 -0800 Subject: [PATCH 82/84] remove ortho-s and marg-s for now --- tests/matnormal/test_matnormal_srm.py | 62 +++++---------------------- 1 file changed, 11 insertions(+), 51 deletions(-) diff --git a/tests/matnormal/test_matnormal_srm.py b/tests/matnormal/test_matnormal_srm.py index 75749c5df..c541fac38 100644 --- a/tests/matnormal/test_matnormal_srm.py +++ b/tests/matnormal/test_matnormal_srm.py @@ -1,6 +1,5 @@ import pytest -from brainiak.matnormal.srm_margs import MNSRM -from brainiak.matnormal.srm_margw import DPMNSRM +from brainiak.matnormal.dpsrm import DPMNSRM from brainiak.matnormal.covs import CovIdentity, CovAR1, CovIsotropic, CovUnconstrainedCholesky import numpy as np @@ -19,14 +18,6 @@ def mnsrm_fakedata(): subjects = 2 features = 3 - def make_noise(noise_distr='iid', noise_scale=0.1): - if noise_distr == 'iid': - noise = noise_scale*np.random.random((voxels, samples)) - elif noise_distr == "unconstrained": - space_chol = np.linalg.cholesky(wishart.rvs(df=voxels+2, scale=np.eye(voxels))) - time_chol = np.linalg.cholesky(wishart.rvs(df=samples+2, scale=np.eye(samples))) - noise = noise_scale * space_chol @ np.random.random((voxels, samples)) @ time_chol - return noise # Create a Shared response S with K = 3 theta = np.linspace(-4 * np.pi, 4 * np.pi, samples) z = np.linspace(-2, 2, samples) @@ -35,6 +26,8 @@ def make_noise(noise_distr='iid', noise_scale=0.1): y = r * np.cos(theta) S = np.vstack((x, y, z)) + + rho = (0.1+np.random.normal(subjects)) ** 2 X = [] W = [] @@ -42,46 +35,19 @@ def make_noise(noise_distr='iid', noise_scale=0.1): for subject in range(subjects): Q, R = np.linalg.qr(np.random.random((voxels, features))) W.append(Q) - X.append(Q.dot(S) + make_noise()) + X.append(Q.dot(S) + rho*np.random.random((voxels, samples))) data = X, W, S sizes = voxels, samples, features, subjects return data, sizes -@pytest.mark.parametrize("svd_init", [True, False]) -def test_mnsrm_margs(mnsrm_fakedata, svd_init): - # Test that MNSRM-MargS - - data, sizes = mnsrm_fakedata - X, W, S = data - voxels, samples, features, subjects = sizes - - model = MNSRM(n_features=features) - assert model, "Cannot instantiate MNSRM!" - - model.fit(X, n_iter=5, svd_init=svd_init) - - assert model.s_.shape == (features, samples), "S wrong shape!" - - for i in range(subjects): - assert model.w_[i].shape == (voxels, features), f"W[{i}] wrong shape!" - - assert model.rho_.shape[0] == subjects, "rho wrong shape!" - - # check that reconstruction isn't terrible - reconstructions = [model.w_[i] @ model.s_ for i in range(subjects)] - corrs = [pearsonr(r.flatten(), x.flatten())[0] - for r, x in zip(reconstructions, X)] - for corr in corrs: - assert corr > 0.9, f"Reconstruction with svd_init={svd_init} is bad!" - -@pytest.mark.parametrize("svd_init,algo,s_constraint,space_cov,time_cov", +@pytest.mark.parametrize("svd_init,algo,space_cov,time_cov", itertools.product([True, False], ["ECM", "ECME"], - ['gaussian','ortho'], [CovIdentity,CovIsotropic], - [CovIdentity, CovAR1])) -def test_mnsrm_margw(mnsrm_fakedata, svd_init, algo, s_constraint, + [CovIdentity], + [CovIdentity])) +def test_mnsrm_margw(mnsrm_fakedata, svd_init, algo, space_cov, time_cov): """ DPMNSRM test """ @@ -90,14 +56,8 @@ def test_mnsrm_margw(mnsrm_fakedata, svd_init, algo, s_constraint, X, W, S = data voxels, samples, features, subjects = sizes - if s_constraint == "ortho": - w_cov = CovUnconstrainedCholesky - else: - w_cov = CovIdentity - - model = DPMNSRM(n_features=features, - s_constraint=s_constraint, algorithm=algo, - time_noise_cov=time_cov, w_cov=w_cov, space_noise_cov=space_cov) + model = DPMNSRM(n_features=features,algorithm=algo, + time_noise_cov=time_cov, space_noise_cov=space_cov) assert model, "Cannot instantiate DPMNSRM!" model.fit(X, max_iter=10, svd_init=svd_init, rtol=0.01, gtol=1e-3) @@ -113,5 +73,5 @@ def test_mnsrm_margw(mnsrm_fakedata, svd_init, algo, s_constraint, corrs = [pearsonr(r.flatten(), x.flatten())[0] for r, x in zip(reconstructions, X)] for corr in corrs: - assert corr > 0.8, f"Reconstruction corr={corr}<0.8 (svd_init={svd_init} algo={algo} s_constraint={s_constraint} space_cov={space_cov} time_cov={time_cov})" + assert corr > 0.8, f"Reconstruction corr={corr}<0.8 (svd_init={svd_init} algo={algo} space_cov={space_cov} time_cov={time_cov})" \ No newline at end of file From ceb3b9f0ac3f560f21e739d43ebe659021e439ec Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 27 Jan 2021 18:06:00 -0800 Subject: [PATCH 83/84] remove prints --- brainiak/matnormal/dpsrm.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/brainiak/matnormal/dpsrm.py b/brainiak/matnormal/dpsrm.py index 9fad68aa4..c1a53a8a5 100644 --- a/brainiak/matnormal/dpsrm.py +++ b/brainiak/matnormal/dpsrm.py @@ -21,10 +21,8 @@ def assert_monotonicity(fun, rtol=1e-3): """ def wrapper(classref, *args, **kwargs): loss_before = classref.lossfn(None) - print(f"loss before {fun} is {loss_before}") res = fun(classref, *args, **kwargs) loss_after = classref.lossfn(None) - print(f"loss after {fun} is {loss_after}") assert loss_after-loss_before <= abs(loss_before*rtol), f"loss increased on {fun}" return res return wrapper From 58a902d91c9835f48a42811f0bdbde807b155e15 Mon Sep 17 00:00:00 2001 From: Michael Shvartsman Date: Wed, 27 Jan 2021 18:06:13 -0800 Subject: [PATCH 84/84] correct size and noise --- tests/matnormal/test_matnormal_srm.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/matnormal/test_matnormal_srm.py b/tests/matnormal/test_matnormal_srm.py index c541fac38..18ca0dc45 100644 --- a/tests/matnormal/test_matnormal_srm.py +++ b/tests/matnormal/test_matnormal_srm.py @@ -13,8 +13,8 @@ def mnsrm_fakedata(): np.random.seed(1) tf.random.set_seed(1) - voxels = 10 - samples = 50 + voxels = 100 + samples = 500 subjects = 2 features = 3 @@ -27,7 +27,7 @@ def mnsrm_fakedata(): S = np.vstack((x, y, z)) - rho = (0.1+np.random.normal(subjects)) ** 2 + rho = (0.1*np.random.normal(subjects)) ** 2 X = [] W = [] @@ -59,7 +59,7 @@ def test_mnsrm_margw(mnsrm_fakedata, svd_init, algo, model = DPMNSRM(n_features=features,algorithm=algo, time_noise_cov=time_cov, space_noise_cov=space_cov) assert model, "Cannot instantiate DPMNSRM!" - model.fit(X, max_iter=10, svd_init=svd_init, rtol=0.01, gtol=1e-3) + model.fit(X, max_iter=20, svd_init=svd_init, rtol=1e-5, gtol=1e-7) assert model.s_.shape == (features, samples), "S wrong shape!"