diff --git a/brainiak/reprsimil/brsa.py b/brainiak/reprsimil/brsa.py index 18af80adf..e51d591e4 100755 --- a/brainiak/reprsimil/brsa.py +++ b/brainiak/reprsimil/brsa.py @@ -497,7 +497,7 @@ def _prepare_data_XYX0(self, X, Y, X0, D, F, run_TRs, no_DC=False): if not np.any(np.isclose(res0[1], 0)): # No columns in X0 can be explained by the # baseline regressors. So we insert them. - X0 = np.concatenate(X_base, X0, axis=1) + X0 = np.concatenate((X_base, X0), axis=1) else: logger.warning('Provided regressors for non-interesting ' 'time series already include baseline. ' @@ -506,7 +506,7 @@ def _prepare_data_XYX0(self, X, Y, X0, D, F, run_TRs, no_DC=False): # If a set of regressors for non-interested signals is not # provided, then we simply include one baseline for each run. X0 = X_base - logger.info('You did not provide time seres of no interest ' + logger.info('You did not provide time series of no interest ' 'such as DC component. One trivial regressor of' ' DC component is included for further modeling.' ' The final covariance matrix won''t ' @@ -593,6 +593,7 @@ def _calc_LL(self, rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY, X0TAX0, SNR2, # in addition to a few other terms. LAMBDA_i = LTXTAcorrXL * SNR2[:, None, None] + np.eye(rank) # dimension: space*rank*rank + LAMBDA = np.linalg.solve(LAMBDA_i, np.identity(rank)[None, :, :]) # dimension: space*rank*rank # LAMBDA is essentially the inverse covariance matrix of the @@ -1011,6 +1012,29 @@ def _fit_diagV_noGP( X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \ X0TY, X0TDY, X0TFY, X0, n_base = self._prepare_data_XYX0( X, Y, X0, D, F, run_TRs, no_DC=True) + + # fit U, the covariance matrix, together with AR(1) param + param0_fitU[idx_param_fitU['Cholesky']] = \ + current_vec_U_chlsk_l + param0_fitU[idx_param_fitU['a1']] = current_a1 + res_fitU = scipy.optimize.minimize( + self._loglike_AR1_diagV_fitU, param0_fitU, + args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag, + XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0, + XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, + current_logSNR2, l_idx, n_C, + n_T, n_V, n_run, n_base, idx_param_fitU, rank), + method=self.optimizer, jac=True, tol=tol, + options={'xtol': tol, 'disp': self.verbose, + 'maxiter': 4}) + current_vec_U_chlsk_l = \ + res_fitU.x[idx_param_fitU['Cholesky']] + current_a1 = res_fitU.x[idx_param_fitU['a1']] + norm_fitUchange = np.linalg.norm(res_fitU.x - param0_fitU) + logger.debug('norm of parameter change after fitting U: ' + '{}'.format(norm_fitUchange)) + param0_fitU = res_fitU.x.copy() + # fit V, reflected in the log(SNR^2) of each voxel rho1 = np.arctan(current_a1) * 2 / np.pi L[l_idx] = current_vec_U_chlsk_l @@ -1043,7 +1067,8 @@ def _fit_diagV_noGP( norm_fitVchange = np.linalg.norm(res_fitV.x - param0_fitV) logger.debug('norm of parameter change after fitting V: ' '{}'.format(norm_fitVchange)) - logger.debug('E[log(SNR2)^2]:'.format(np.mean(current_logSNR2**2))) + logger.debug('E[log(SNR2)^2]: {}'.format( + np.mean(current_logSNR2**2))) # The lines below are for debugging purpose. # If any voxel's log(SNR^2) gets to non-finite number, @@ -1057,28 +1082,6 @@ def _fit_diagV_noGP( param0_fitV = res_fitV.x.copy() - # fit U, the covariance matrix, together with AR(1) param - param0_fitU[idx_param_fitU['Cholesky']] = \ - current_vec_U_chlsk_l - param0_fitU[idx_param_fitU['a1']] = current_a1 - res_fitU = scipy.optimize.minimize( - self._loglike_AR1_diagV_fitU, param0_fitU, - args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag, - XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0, - XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, - current_logSNR2, l_idx, n_C, - n_T, n_V, n_run, n_base, idx_param_fitU, rank), - method=self.optimizer, jac=True, tol=tol, - options={'xtol': tol, 'disp': self.verbose, - 'maxiter': 3}) - current_vec_U_chlsk_l = \ - res_fitU.x[idx_param_fitU['Cholesky']] - current_a1 = res_fitU.x[idx_param_fitU['a1']] - norm_fitUchange = np.linalg.norm(res_fitU.x - param0_fitU) - logger.debug('norm of parameter change after fitting U: ' - '{}'.format(norm_fitUchange)) - param0_fitU = res_fitU.x.copy() - # Re-estimating X0 from residuals current_SNR2 = np.exp(current_logSNR2) if self.auto_nuisance: @@ -1135,6 +1138,34 @@ def _fit_diagV_GP( X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \ X0TY, X0TDY, X0TFY, X0, n_base = self._prepare_data_XYX0( X, Y, X0, D, F, run_TRs, no_DC=True) + + # fit U + + param0_fitU[idx_param_fitU['Cholesky']] = \ + current_vec_U_chlsk_l + param0_fitU[idx_param_fitU['a1']] = current_a1 + + res_fitU = scipy.optimize.minimize( + self._loglike_AR1_diagV_fitU, param0_fitU, + args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag, + XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0, + XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, + current_logSNR2, l_idx, n_C, n_T, n_V, + n_run, n_base, idx_param_fitU, rank), + method=self.optimizer, jac=True, + tol=tol, + options={'xtol': tol, + 'disp': self.verbose, 'maxiter': 6}) + current_vec_U_chlsk_l = \ + res_fitU.x[idx_param_fitU['Cholesky']] + current_a1 = res_fitU.x[idx_param_fitU['a1']] + L[l_idx] = current_vec_U_chlsk_l + fitUchange = res_fitU.x - param0_fitU + norm_fitUchange = np.linalg.norm(fitUchange) + logger.debug('norm of parameter change after fitting U: ' + '{}'.format(norm_fitUchange)) + param0_fitU = res_fitU.x.copy() + # fit V rho1 = np.arctan(current_a1) * 2 / np.pi X0TAX0, XTAX0, X0TAY, X0TAX0_i, \ @@ -1176,33 +1207,6 @@ def _fit_diagV_GP( logger.debug('E[log(SNR2)^2]: {}'.format( np.mean(current_logSNR2**2))) - # fit U - - param0_fitU[idx_param_fitU['Cholesky']] = \ - current_vec_U_chlsk_l - param0_fitU[idx_param_fitU['a1']] = current_a1 - - res_fitU = scipy.optimize.minimize( - self._loglike_AR1_diagV_fitU, param0_fitU, - args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag, - XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0, - XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, - current_logSNR2, l_idx, n_C, n_T, n_V, - n_run, n_base, idx_param_fitU, rank), - method=self.optimizer, jac=True, - tol=tol, - options={'xtol': tol, - 'disp': self.verbose, 'maxiter': 6}) - current_vec_U_chlsk_l = \ - res_fitU.x[idx_param_fitU['Cholesky']] - current_a1 = res_fitU.x[idx_param_fitU['a1']] - L[l_idx] = current_vec_U_chlsk_l - fitUchange = res_fitU.x - param0_fitU - norm_fitUchange = np.linalg.norm(fitUchange) - logger.debug('norm of parameter change after fitting U: ' - '{}'.format(norm_fitUchange)) - param0_fitU = res_fitU.x.copy() - # Re-estimating X0 from residuals current_SNR2 = np.exp(current_logSNR2) if self.auto_nuisance: @@ -1291,16 +1295,18 @@ def _loglike_AR1_diagV_fitU(self, param, XTX, XTDX, XTFX, YTY_diag, L, rho1, n_V, n_base) # Only starting from this point, SNR2 is involved + LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2 \ - = self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY, X0TAX0, - SNR2, n_V, n_T, n_run, rank, n_base) + = self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY, + X0TAX0, SNR2, n_V, n_T, n_run, rank, n_base) if not np.isfinite(LL): - logger.debug('NaN detected!') - logger.debug(sigma2) - logger.debug(YTAcorrY) - logger.debug(LTXTAcorrY) - logger.debug(YTAcorrXL_LAMBDA) - logger.debug(SNR2) + logger.warning('NaN detected!') + logger.warning('LL: {}'.format(LL)) + logger.warning('sigma2: {}'.format(sigma2)) + logger.warning('YTAcorrY: {}'.format(YTAcorrY)) + logger.warning('LTXTAcorrY: {}'.format(LTXTAcorrY)) + logger.warning('YTAcorrXL_LAMBDA: {}'.format(YTAcorrXL_LAMBDA)) + logger.warning('SNR2: {}'.format(SNR2)) YTAcorrXL_LAMBDA_LT = np.dot(YTAcorrXL_LAMBDA, L.T) # dimension: space*feature (feature can be larger than rank) diff --git a/brainiak/utils/utils.py b/brainiak/utils/utils.py index 2c206aae7..428f8d042 100644 --- a/brainiak/utils/utils.py +++ b/brainiak/utils/utils.py @@ -329,7 +329,7 @@ def read_afni(self, fname): int(split_by_at[1]) curr_idx += n_this_cond elif len(split_by_at) == 1 and \ - not re.search('..', split_by_at[0]): + not re.search('\..', split_by_at[0]): # Just a number, and not the type like '1..4' self.column_types[curr_idx] = int(split_by_at[0]) curr_idx += 1 diff --git a/examples/reprsimil/brsa_representational_similarity_estimate_example.ipynb b/examples/reprsimil/brsa_representational_similarity_estimate_example.ipynb index 8ea25bd4c..d7faea336 100644 --- a/examples/reprsimil/brsa_representational_similarity_estimate_example.ipynb +++ b/examples/reprsimil/brsa_representational_similarity_estimate_example.ipynb @@ -6,8 +6,7 @@ "collapsed": false }, "source": [ - "# This demo shows how to use the Bayesian Representational Similarity Analysis method in brainiak with a simulated dataset.\n", - "Questions can be directed to mcai [ at ] princeton [ dot ] edu" + "# This demo shows how to use the Bayesian Representational Similarity Analysis method in brainiak with a simulated dataset." ] }, { @@ -153,7 +152,6 @@ "rho1 = np.random.rand(n_V) \\\n", " * (rho1_top - rho1_bot) + rho1_bot\n", "\n", - "\n", "noise_smooth_width = 10.0\n", "coords = np.mgrid[0:ROI_edge, 0:ROI_edge, 0:1]\n", "coords_flat = np.reshape(coords,[3, n_V]).T\n", @@ -342,6 +340,14 @@ "# The data to be fed to the program.\n", "\n", "\n", + "fig = plt.figure(num=None, figsize=(4, 4), dpi=100)\n", + "plt.pcolor(np.reshape(snr, [ROI_edge, ROI_edge]))\n", + "plt.colorbar()\n", + "ax = plt.gca()\n", + "ax.set_aspect(1)\n", + "plt.title('pseudo-SNR in a square \"ROI\"')\n", + "plt.show()\n", + "\n", "idx = np.argmin(np.abs(snr - np.median(snr)))\n", "# choose a voxel of medium level SNR.\n", "fig = plt.figure(num=None, figsize=(12, 4), dpi=150,\n", @@ -357,24 +363,35 @@ "fig = plt.figure(num=None, figsize=(12, 4), dpi=150,\n", " facecolor='w', edgecolor='k')\n", "data_plot, = plt.plot(Y[:,idx],'b')\n", - "plt.legend([data_plot], ['observed data'])\n", + "plt.legend([data_plot], ['observed data of the voxel'])\n", "plt.xlabel('time')\n", "plt.show()\n", "\n", - "fig = plt.figure(num=None, figsize=(4, 4), dpi=100)\n", - "plt.pcolor(np.reshape(snr, [ROI_edge, ROI_edge]))\n", - "plt.colorbar()\n", - "ax = plt.gca()\n", - "ax.set_aspect(1)\n", - "plt.title('pseudo-SNR in a square \"ROI\"')\n", - "plt.show()" + "idx = np.argmin(np.abs(snr - np.max(snr)))\n", + "# display the voxel of the highest level SNR.\n", + "fig = plt.figure(num=None, figsize=(12, 4), dpi=150,\n", + " facecolor='w', edgecolor='k')\n", + "noise_plot, = plt.plot(noise[:,idx],'g')\n", + "signal_plot, = plt.plot(signal[:,idx],'r')\n", + "plt.legend([noise_plot, signal_plot], ['noise', 'signal'])\n", + "plt.title('simulated data in the voxel with the highest'\n", + " ' pseudo-SNR of {}'.format(snr[idx]))\n", + "plt.xlabel('time')\n", + "plt.show()\n", + "\n", + "fig = plt.figure(num=None, figsize=(12, 4), dpi=150,\n", + " facecolor='w', edgecolor='k')\n", + "data_plot, = plt.plot(Y[:,idx],'b')\n", + "plt.legend([data_plot], ['observed data of the voxel'])\n", + "plt.xlabel('time')\n", + "plt.show()\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### The reason that the pseudo-SNR in the example voxel is not too small, while the signal looks much smaller is because we happen to have low amplitudes in our design matrix. The true SNR depends on both the amplitudes in design matrix and the pseudo-SNR. Therefore, be aware that pseudo-SNR does not directly reflects how much signal the data have, but rather a map indicating the relative strength of signal in differerent voxels.\n", + "#### The reason that the pseudo-SNRs in the example voxels are not too small, while the signal looks much smaller is because we happen to have low amplitudes in our design matrix. The true SNR depends on both the amplitudes in design matrix and the pseudo-SNR. Therefore, be aware that pseudo-SNR does not directly reflects how much signal the data have, but rather a map indicating the relative strength of signal in differerent voxels.\n", "#### When you have multiple runs, the noise won't be correlated between runs. Therefore, you should tell BRSA when is the onset of each scan. \n", "#### Note that the data (variable Y above) you feed to BRSA is the concatenation of data from all runs along the time dimension, as a 2-D matrix of time x space" ] @@ -396,8 +413,10 @@ "metadata": {}, "source": [ "# Fit Bayesian RSA to our simulated data\n", - "n_nureg tells the model how many principal components to keep from the residual as nuisance regressors in order to account for spatial correlation in noise.\n", - "The nuisance regressors in typical fMRI analysis (such as head motion signal) are replaced by principal components estimated from residuals after subtracting task-related response. If you prefer not using this approach based on principal components of residuals, you can set auto_nuisance=False, and optionally provide your own nuisance regressors" + "### The data should be z-scored along time dimension. Otherwise voxels of high variance in noise can dominate the estimation of spatially shared noise component.\n", + "\n", + "The nuisance regressors in typical fMRI analysis (such as head motion signal) are replaced by principal components estimated from residuals after subtracting task-related response. n_nureg tells the model how many principal components to keep from the residual as nuisance regressors, in order to account for spatial correlation in noise. \n", + "If you prefer not using this approach based on principal components of residuals, you can set auto_nuisance=False, and optionally provide your own nuisance regressors as nuisance argument to BRSA.fit()" ] }, { @@ -408,6 +427,10 @@ }, "outputs": [], "source": [ + "Y_z = scipy.stats.zscore(Y,axis=0)\n", + "Y_std = np.std(Y,axis=0)\n", + "# Z-scoreing the data\n", + "\n", "brsa = BRSA(GP_space=True, GP_inten=True,\n", " n_nureg=10)\n", "# Initiate an instance, telling it\n", @@ -415,7 +438,7 @@ "# over both space and intensity.\n", "\n", "\n", - "brsa.fit(X=Y, design=design.design_task,\n", + "brsa.fit(X=Y_z, design=design.design_task,\n", " coords=coords_flat, inten=inten, scan_onsets=scan_onsets)\n", "# The data to fit should be given to the argument X.\n", "# Design matrix goes to design. And so on.\n" @@ -477,7 +500,7 @@ "source": [ "regressor = np.insert(design.design_task,\n", " 0, 1, axis=1)\n", - "betas_point = np.linalg.lstsq(regressor, Y)[0]\n", + "betas_point = np.linalg.lstsq(regressor, Y_z)[0]\n", "point_corr = np.corrcoef(betas_point[1:, :])\n", "point_cov = np.cov(betas_point[1:, :])\n", "fig = plt.figure(num=None, figsize=(4, 4), dpi=100)\n", @@ -578,14 +601,6 @@ }, "outputs": [], "source": [ - "plt.scatter(noise_level * np.sqrt(0.1/1.1), brsa.sigma_)\n", - "plt.xlabel('true \"independent\" noise level')\n", - "plt.ylabel('recovered \"independent\" noise level')\n", - "ax = plt.gca()\n", - "ax.set_aspect(1)\n", - "ax.set_xticks(np.arange(0.1,0.7,0.1))\n", - "ax.set_yticks(np.arange(0.1,0.7,0.1))\n", - "plt.show()\n", "\n", "plt.scatter(rho1, brsa.rho_)\n", "plt.xlabel('true AR(1) coefficients')\n", @@ -619,19 +634,17 @@ }, "outputs": [], "source": [ - "plt.scatter(betas_simulated, brsa.beta_)\n", + "plt.scatter(betas_simulated / Y_std, brsa.beta_)\n", "plt.xlabel('true betas (response amplitudes)')\n", "plt.ylabel('recovered betas by Bayesian RSA')\n", "ax = plt.gca()\n", - "ax.set_aspect(1)\n", "plt.show()\n", "\n", "\n", - "plt.scatter(betas_simulated, betas_point[1:, :])\n", + "plt.scatter(betas_simulated / Y_std, betas_point[1:, :])\n", "plt.xlabel('true betas (response amplitudes)')\n", "plt.ylabel('recovered betas by simple regression')\n", "ax = plt.gca()\n", - "ax.set_aspect(1)\n", "plt.show()" ] }, @@ -651,7 +664,7 @@ }, "outputs": [], "source": [ - "u, s, v = np.linalg.svd(noise)\n", + "u, s, v = np.linalg.svd(Y_z - signal / Y_std)\n", "plt.plot(s)\n", "plt.xlabel('principal component')\n", "plt.ylabel('singular value of simulated noise')\n", @@ -664,18 +677,8 @@ "\n", "plt.pcolor(np.reshape(brsa.beta0_[0,:], [ROI_edge, ROI_edge]))\n", "plt.title('Weights of the first recovered principal component in noise')\n", - "plt.show()\n", - "print(brsa.beta0_.shape)" + "plt.show()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/tests/reprsimil/test_brsa.py b/tests/reprsimil/test_brsa.py index cfb832434..ad25e2d53 100755 --- a/tests/reprsimil/test_brsa.py +++ b/tests/reprsimil/test_brsa.py @@ -40,8 +40,8 @@ def test_fit(): file_path = os.path.join(os.path.dirname(__file__), "example_design.1D") # Load an example design matrix design = utils.ReadDesign(fname=file_path) - - + + # concatenate it by 4 times, mimicking 4 runs of itenditcal timing design.design_task = np.tile(design.design_task[:,:-1],[4,1]) design.n_TR = design.n_TR * 4 @@ -336,7 +336,7 @@ def test_gradient(): l_idx, n_C, n_T, n_V, n_run, n_base, idx_param_sing)[0], param0_sing, vec) - assert np.isclose(dd, np.dot(deriv0, vec), rtol=0.01), 'gradient of singpara wrt Cholesky is incorrect' + assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), 'gradient of singpara wrt Cholesky is incorrect' # We test the gradient to a1 vec = np.zeros(np.size(param0_sing)) @@ -347,7 +347,8 @@ def test_gradient(): l_idx, n_C, n_T, n_V, n_run, n_base, idx_param_sing)[0], param0_sing, vec) - assert np.isclose(dd, np.dot(deriv0, vec), rtol=0.01), 'gradient of singpara wrt a1 is incorrect' + assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), 'gradient of singpara wrt a1 is incorrect' + # log likelihood and derivative of the fitU function. @@ -365,7 +366,18 @@ def test_gradient(): XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, np.log(snr)*2, l_idx, n_C, n_T, n_V, n_run, n_base, idx_param_fitU, n_C)[0], param0_fitU, vec) - assert np.isclose(dd, np.dot(deriv0,vec), rtol=0.01), 'gradient of fitU wrt to AR(1) coefficient incorrect' + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitU wrt to AR(1) coefficient incorrect' + + # We test if the numerical and analytical gradient wrt to the first element of Cholesky factor is correct + vec = np.zeros(np.size(param0_fitU)) + vec[idx_param_fitU['Cholesky'][0]] = 1 + dd = nd.directionaldiff(lambda x: brsa._loglike_AR1_diagV_fitU(x, XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag, + XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0, + XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, + np.log(snr)*2, l_idx, n_C, n_T, n_V, n_run,n_base, + idx_param_fitU, n_C)[0], param0_fitU, vec) + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitU wrt Cholesky factor incorrect' + # We test if the numerical and analytical gradient wrt to the first element of Cholesky factor is correct vec = np.zeros(np.size(param0_fitU)) @@ -385,7 +397,8 @@ def test_gradient(): XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, np.log(snr)*2, l_idx, n_C, n_T, n_V, n_run, n_base, idx_param_fitU, n_C)[0], param0_fitU, vec) - assert np.isclose(dd, np.dot(deriv0,vec), rtol=0.01), 'gradient of fitU incorrect' + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitU incorrect' + # We test the gradient of _fitV wrt to log(SNR^2) assuming no GP prior. X0TAX0, XTAX0, X0TAY, X0TAX0_i, \ @@ -420,7 +433,7 @@ def test_gradient(): l_idx, n_C, n_T, n_V, n_run, n_base, idx_param_fitV, n_C, False, False)[0], param0_fitV[idx_param_fitV['log_SNR2']], vec) - assert np.isclose(dd, np.dot(deriv0,vec), rtol=0.01), 'gradient of fitV wrt log(SNR2) incorrect for model without GP' + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitV wrt log(SNR2) incorrect for model without GP' # We test the gradient of _fitV wrt to log(SNR^2) assuming GP prior. ll0, deriv0 = brsa._loglike_AR1_diagV_fitV(param0_fitV, X0TAX0, XTAX0, X0TAY, @@ -440,7 +453,7 @@ def test_gradient(): idx_param_fitV, n_C, True, True, dist2, inten_diff2, 100, 100)[0], param0_fitV, vec) - assert np.isclose(dd, np.dot(deriv0,vec), rtol=0.01), 'gradient of fitV srt log(SNR2) incorrect for model with GP' + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitV srt log(SNR2) incorrect for model with GP' # We test the graident wrt spatial length scale parameter of GP prior vec = np.zeros(np.size(param0_fitV)) @@ -453,7 +466,7 @@ def test_gradient(): idx_param_fitV, n_C, True, True, dist2, inten_diff2, 100, 100)[0], param0_fitV, vec) - assert np.isclose(dd, np.dot(deriv0,vec), rtol=0.01), 'gradient of fitV wrt spatial length scale of GP incorrect' + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitV wrt spatial length scale of GP incorrect' # We test the graident wrt intensity length scale parameter of GP prior vec = np.zeros(np.size(param0_fitV)) @@ -466,7 +479,7 @@ def test_gradient(): idx_param_fitV, n_C, True, True, dist2, inten_diff2, 100, 100)[0], param0_fitV, vec) - assert np.isclose(dd, np.dot(deriv0,vec), rtol=0.01), 'gradient of fitV wrt intensity length scale of GP incorrect' + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitV wrt intensity length scale of GP incorrect' # We test the graident on a random direction vec = np.random.randn(np.size(param0_fitV)) @@ -479,4 +492,4 @@ def test_gradient(): idx_param_fitV, n_C, True, True, dist2, inten_diff2, 100, 100)[0], param0_fitV, vec) - assert np.isclose(dd, np.dot(deriv0,vec), rtol=0.01), 'gradient of fitV incorrect' + assert np.isclose(dd, np.dot(deriv0,vec), rtol=1e-5), 'gradient of fitV incorrect'