diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..b495d6aa --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,20 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: doc/source/conf.py + +# Optionally build your docs in additional formats such as PDF +formats: + - pdf + +# Optionally set the version of Python and requirements required to build your docs +python: + version: 3.8 + install: + - requirements: doc/requirements.txt diff --git a/README.md b/README.md index caf36c18..4b89eaae 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ # Predictive Clinical Neuroscience Toolkit -Predictive Clinical Neuroscience software toolkit (formerly nispat). Methods for normative modelling, spatial statistics and pattern recognition. - [![Gitter](https://badges.gitter.im/predictive-clinical-neuroscience/community.svg)](https://gitter.im/predictive-clinical-neuroscience/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Documentation Status](https://readthedocs.org/projects/pcntoolkit/badge/?version=latest)](https://pcntoolkit.readthedocs.io/en/latest/?badge=latest) +Predictive Clinical Neuroscience software toolkit (formerly nispat). + +Methods for normative modelling, spatial statistics and pattern recognition. Documentation, including tutorials can be found on [readthedocs](https://pcntoolkit.readthedocs.io/en/latest/). Click on the docs button above to visit the site. + ## Basic installation (on a local machine) i) install anaconda3 ii) create enviornment with "conda create --name " iii) activate environment by "source activate " iv) install required conda packages @@ -28,7 +30,7 @@ conda --version Create a conda environment in a shared location ``` -conda create -y python==3.7.7 numpy mkl blas --prefix=/shared/conda/ +conda create -y python==3.8.3 numpy mkl blas --prefix=/shared/conda/ ``` Activate the conda environment diff --git a/doc/Makefile b/doc/Makefile index 6a1cdc10..03022734 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -5,14 +5,15 @@ SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = +SOURCEDIR = source BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) # the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) .PHONY: help help: diff --git a/doc/build/doctrees/_templates/class.doctree b/doc/build/doctrees/_templates/class.doctree new file mode 100644 index 00000000..0aa5ab41 Binary files /dev/null and b/doc/build/doctrees/_templates/class.doctree differ diff --git a/doc/build/doctrees/_templates/function.doctree b/doc/build/doctrees/_templates/function.doctree new file mode 100644 index 00000000..97d2460d Binary files /dev/null and b/doc/build/doctrees/_templates/function.doctree differ diff --git a/doc/build/doctrees/environment.pickle b/doc/build/doctrees/environment.pickle index 8e93c8ce..f873bdf0 100644 Binary files a/doc/build/doctrees/environment.pickle and b/doc/build/doctrees/environment.pickle differ diff --git a/doc/build/doctrees/index.doctree b/doc/build/doctrees/index.doctree index 76d2a258..ee8abd94 100644 Binary files a/doc/build/doctrees/index.doctree and b/doc/build/doctrees/index.doctree differ diff --git a/doc/build/doctrees/modindex.doctree b/doc/build/doctrees/modindex.doctree index 7b205d7c..def64f3d 100644 Binary files a/doc/build/doctrees/modindex.doctree and b/doc/build/doctrees/modindex.doctree differ diff --git a/doc/build/doctrees/pages/FAQs.doctree b/doc/build/doctrees/pages/FAQs.doctree new file mode 100644 index 00000000..32fbb0fa Binary files /dev/null and b/doc/build/doctrees/pages/FAQs.doctree differ diff --git a/doc/build/doctrees/pages/_templates/class.doctree b/doc/build/doctrees/pages/_templates/class.doctree new file mode 100644 index 00000000..9cce95e2 Binary files /dev/null and b/doc/build/doctrees/pages/_templates/class.doctree differ diff --git a/doc/build/doctrees/pages/_templates/function.doctree b/doc/build/doctrees/pages/_templates/function.doctree new file mode 100644 index 00000000..de0ea542 Binary files /dev/null and b/doc/build/doctrees/pages/_templates/function.doctree differ diff --git a/doc/build/doctrees/pages/acknowledgements.doctree b/doc/build/doctrees/pages/acknowledgements.doctree new file mode 100644 index 00000000..5d2a9224 Binary files /dev/null and b/doc/build/doctrees/pages/acknowledgements.doctree differ diff --git a/doc/build/doctrees/pages/citing.doctree b/doc/build/doctrees/pages/citing.doctree new file mode 100644 index 00000000..ba773abf Binary files /dev/null and b/doc/build/doctrees/pages/citing.doctree differ diff --git a/doc/build/doctrees/pages/glossary.doctree b/doc/build/doctrees/pages/glossary.doctree new file mode 100644 index 00000000..1f5987e0 Binary files /dev/null and b/doc/build/doctrees/pages/glossary.doctree differ diff --git a/doc/build/doctrees/pages/installation.doctree b/doc/build/doctrees/pages/installation.doctree new file mode 100644 index 00000000..d0afce1e Binary files /dev/null and b/doc/build/doctrees/pages/installation.doctree differ diff --git a/doc/build/doctrees/pages/pcntoolkit_background.doctree b/doc/build/doctrees/pages/pcntoolkit_background.doctree new file mode 100644 index 00000000..885f2276 Binary files /dev/null and b/doc/build/doctrees/pages/pcntoolkit_background.doctree differ diff --git a/doc/build/doctrees/pages/references.doctree b/doc/build/doctrees/pages/references.doctree new file mode 100644 index 00000000..dbc775f5 Binary files /dev/null and b/doc/build/doctrees/pages/references.doctree differ diff --git a/doc/build/doctrees/pages/scripts.doctree b/doc/build/doctrees/pages/scripts.doctree new file mode 100644 index 00000000..ce0344d2 Binary files /dev/null and b/doc/build/doctrees/pages/scripts.doctree differ diff --git a/doc/build/doctrees/pages/tutorial_CPC2020.doctree b/doc/build/doctrees/pages/tutorial_CPC2020.doctree new file mode 100644 index 00000000..d8b930e8 Binary files /dev/null and b/doc/build/doctrees/pages/tutorial_CPC2020.doctree differ diff --git a/doc/build/doctrees/pages/tutorial_HBR.doctree b/doc/build/doctrees/pages/tutorial_HBR.doctree new file mode 100644 index 00000000..30ba12dd Binary files /dev/null and b/doc/build/doctrees/pages/tutorial_HBR.doctree differ diff --git a/doc/build/doctrees/pages/tutorial_ROIcorticalthickness.doctree b/doc/build/doctrees/pages/tutorial_ROIcorticalthickness.doctree new file mode 100644 index 00000000..d461556c Binary files /dev/null and b/doc/build/doctrees/pages/tutorial_ROIcorticalthickness.doctree differ diff --git a/doc/build/doctrees/pages/updates.doctree b/doc/build/doctrees/pages/updates.doctree new file mode 100644 index 00000000..48a29769 Binary files /dev/null and b/doc/build/doctrees/pages/updates.doctree differ diff --git a/doc/build/html/.buildinfo b/doc/build/html/.buildinfo index af3fd4c8..c59d51bf 100644 --- a/doc/build/html/.buildinfo +++ b/doc/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: dde6024847b5bf213ad9998ffa45db93 +config: 5392fc74b0cbc4bdad768b650de8eaca tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/doc/build/html/_images/math/136499ff23ae31f75bf05de819b869f7cb8675a7.png b/doc/build/html/_images/math/136499ff23ae31f75bf05de819b869f7cb8675a7.png new file mode 100644 index 00000000..016a6859 Binary files /dev/null and b/doc/build/html/_images/math/136499ff23ae31f75bf05de819b869f7cb8675a7.png differ diff --git a/doc/build/html/_images/nm_concept.png b/doc/build/html/_images/nm_concept.png new file mode 100644 index 00000000..367090c9 Binary files /dev/null and b/doc/build/html/_images/nm_concept.png differ diff --git a/doc/build/html/_images/nm_overview.png b/doc/build/html/_images/nm_overview.png new file mode 100644 index 00000000..b1d794d0 Binary files /dev/null and b/doc/build/html/_images/nm_overview.png differ diff --git a/doc/build/html/_images/nm_parallel.png b/doc/build/html/_images/nm_parallel.png new file mode 100644 index 00000000..8066968c Binary files /dev/null and b/doc/build/html/_images/nm_parallel.png differ diff --git a/doc/build/html/_images/nm_plot.jpeg b/doc/build/html/_images/nm_plot.jpeg new file mode 100644 index 00000000..009dff14 Binary files /dev/null and b/doc/build/html/_images/nm_plot.jpeg differ diff --git a/doc/build/html/_modules/bayesreg.html b/doc/build/html/_modules/bayesreg.html index 5112b8ee..1507a312 100644 --- a/doc/build/html/_modules/bayesreg.html +++ b/doc/build/html/_modules/bayesreg.html @@ -1,446 +1,790 @@ - - - - - - - bayesreg — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + bayesreg — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
-
-
-
+ + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for bayesreg

+from __future__ import print_function
+from __future__ import division
 
-
[docs]class BLR: - """Bayesian linear regression +import numpy as np +from scipy import optimize , linalg +from scipy.linalg import LinAlgError - Estimation and prediction of Bayesian linear regression models - Basic usage:: +
[docs]class BLR: + """Bayesian linear regression - B = BLR() - hyp = B.estimate(hyp0, X, y) - ys,s2 = B.predict(hyp, X, y, Xs) + Estimation and prediction of Bayesian linear regression models - where the variables are + Basic usage:: - :param hyp: vector of hyperparmaters. - :param X: N x D data array - :param y: 1D Array of targets (length N) - :param Xs: Nte x D array of test cases - :param hyp0: starting estimates for hyperparameter optimisation + B = BLR() + hyp = B.estimate(hyp0, X, y) + ys,s2 = B.predict(hyp, X, y, Xs) - :returns: * ys - predictive mean - * s2 - predictive variance + where the variables are - The hyperparameters are:: + :param hyp: vector of hyperparmaters. + :param X: N x D data array + :param y: 1D Array of targets (length N) + :param Xs: Nte x D array of test cases + :param hyp0: starting estimates for hyperparameter optimisation - hyp = ( log(beta), log(alpha) ) # hyp is a list or numpy array + :returns: * ys - predictive mean + * s2 - predictive variance - The implementation and notation mostly follows Bishop (2006). - The hyperparameter beta is the noise precision and alpha is the precision - over lengthscale parameters. This can be either a scalar variable (a - common lengthscale for all input variables), or a vector of length D (a - different lengthscale for each input variable, derived using an automatic - relevance determination formulation). These are estimated using conjugate - gradient optimisation of the marginal likelihood. + The hyperparameters are:: - Reference: - Bishop (2006) Pattern Recognition and Machine Learning, Springer + hyp = ( log(beta), log(alpha) ) # hyp is a list or numpy array - Written by A. Marquand - """ + The implementation and notation mostly follows Bishop (2006). + The hyperparameter beta is the noise precision and alpha is the precision + over lengthscale parameters. This can be either a scalar variable (a + common lengthscale for all input variables), or a vector of length D (a + different lengthscale for each input variable, derived using an automatic + relevance determination formulation). These are estimated using conjugate + gradient optimisation of the marginal likelihood. - def __init__(self, hyp=None, X=None, y=None, - n_iter=100, tol=1e-3, verbose=False, - var_groups=None, warp=None): + Reference: + Bishop (2006) Pattern Recognition and Machine Learning, Springer - self.hyp = np.nan - self.nlZ = np.nan - self.tol = tol # not used at present - self.n_iter = n_iter - self.verbose = verbose - self.var_groups = var_groups - if self.var_groups is not None: - self.var_ids = set(self.var_groups) - self.var_ids = sorted(list(self.var_ids)) + Written by A. Marquand + """ - # set up warped likelihood - if warp is None: - self.warp = None - self.n_warp_param = 0 - else: - self.warp = warp - self.n_warp_param = warp.get_n_params() - self.gamma = None + def __init__(self, **kwargs): + # parse arguments + n_iter = kwargs.get('n_iter', 100) + tol = kwargs.get('tol', 1e-3) + verbose = kwargs.get('verbose', False) + var_groups = kwargs.get('var_groups', None) + var_covariates = kwargs.get('var_covariates', None) + warp = kwargs.get('warp', None) + warp_reparam = kwargs.get('warp_reparam', False) + + if var_groups is not None and var_covariates is not None: + raise ValueError("var_covariates and var_groups cannot both be used") + + # basic parameters + self.hyp = np.nan + self.nlZ = np.nan + self.tol = tol # not used at present + self.n_iter = n_iter + self.verbose = verbose + self.var_groups = var_groups + if var_covariates is not None: + self.hetero_var = True + else: + self.hetero_var = False + if self.var_groups is not None: + self.var_ids = set(self.var_groups) + self.var_ids = sorted(list(self.var_ids)) + + # set up warped likelihood + if verbose: + print('warp:', warp, 'warp_reparam:', warp_reparam) + if warp is None: + self.warp = None + self.n_warp_param = 0 + else: + self.warp = warp + self.n_warp_param = warp.get_n_params() + self.warp_reparam = warp_reparam + + self.gamma = None - def _parse_hyps(self, hyp, X): + def _parse_hyps(self, hyp, X, Xv=None): - N = X.shape[0] + N = X.shape[0] - # hyperparameters - if self.var_groups is None: - beta = np.asarray([np.exp(hyp[0])]) # noise precision - self.Lambda_n = np.diag(np.ones(N)*beta) - self.Sigma_n = np.diag(np.ones(N)/beta) - else: - beta = np.exp(hyp[0:len(self.var_ids)]) - beta_all = np.ones(N) - for v in range(len(self.var_ids)): - beta_all[self.var_groups == self.var_ids[v]] = beta[v] - self.Lambda_n = np.diag(beta_all) - self.Sigma_n = np.diag(1/beta_all) + # noise precision + if Xv is not None: + if len(Xv.shape) == 1: + Dv = 1 + Xv = Xv[:, np.newaxis] + else: + Dv = Xv.shape[1] + w_d = np.asarray(hyp[0:Dv]) + beta = np.exp(Xv.dot(w_d)) + n_lik_param = len(w_d) + elif self.var_groups is not None: + beta = np.exp(hyp[0:len(self.var_ids)]) + n_lik_param = len(beta) + else: + beta = np.asarray([np.exp(hyp[0])]) + n_lik_param = len(beta) + + # parameters for warping the likelhood function + if self.warp is not None: + gamma = hyp[n_lik_param:(n_lik_param + self.n_warp_param)] + n_lik_param += self.n_warp_param + else: + gamma = None + + # precision for the coefficients + if isinstance(beta, list) or type(beta) is np.ndarray: + alpha = np.exp(hyp[n_lik_param:]) + else: + alpha = np.exp(hyp[1:]) + + # reparameterise the warp (WarpSinArcsinh only) + if self.warp is not None and self.warp_reparam: + delta = np.exp(gamma[1]) + beta = beta/(delta**2) + + # Create precision matrix from noise precision + if Xv is not None: + self.lambda_n_vec = beta + elif self.var_groups is not None: + beta_all = np.ones(N) + for v in range(len(self.var_ids)): + beta_all[self.var_groups == self.var_ids[v]] = beta[v] + self.lambda_n_vec = beta_all + else: + self.lambda_n_vec = np.ones(N)*beta + + return beta, alpha, gamma - # parameters for warping the likelhood function - n_lik_param = len(beta) - if self.warp is not None: - gamma = hyp[n_lik_param:(n_lik_param + self.n_warp_param)] - n_lik_param += self.n_warp_param - else: - gamma = None - - # precision for the coefficients - if isinstance(beta, list) or type(beta) is np.ndarray: - alpha = np.exp(hyp[n_lik_param:]) - else: - alpha = np.exp(hyp[1:]) - - return beta, alpha, gamma +
[docs] def post(self, hyp, X, y, Xv=None): + """ Generic function to compute posterior distribution. + + This function will save the posterior mean and precision matrix as + self.m and self.A and will also update internal parameters (e.g. + N, D and the prior covariance (Sigma_a) and precision (Lambda_a). + + :param hyp: hyperparameter vector + :param X: covariates + :param y: responses + :param Xv: covariates for heteroskedastic noise + """ + + N = X.shape[0] + if len(X.shape) == 1: + D = 1 + else: + D = X.shape[1] + + if (hyp == self.hyp).all() and hasattr(self, 'N'): + print("hyperparameters have not changed, exiting") + return -
[docs] def post(self, hyp, X, y): - """ Generic function to compute posterior distribution. - - This function will save the posterior mean and precision matrix as - self.m and self.A and will also update internal parameters (e.g. - N, D and the prior covariance (Sigma_a) and precision (Lambda_a). - """ - - N = X.shape[0] - if len(X.shape) == 1: - D = 1 - else: - D = X.shape[1] - - if (hyp == self.hyp).all() and hasattr(self, 'N'): - print("hyperparameters have not changed, exiting") - return + beta, alpha, gamma = self._parse_hyps(hyp, X, Xv) + + if self.verbose: + print("estimating posterior ... | hyp=", hyp) + + # prior variance + if len(alpha) == 1 or len(alpha) == D: + self.Sigma_a = np.diag(np.ones(D))/alpha + self.Lambda_a = np.diag(np.ones(D))*alpha + else: + raise ValueError("hyperparameter vector has invalid length") + + # compute posterior precision and mean + # this is equivalent to the following operation but makes much more + # efficient use of memory by avoiding the need to store Lambda_n + # + # self.A = X.T.dot(self.Lambda_n).dot(X) + self.Lambda_a + # self.m = linalg.solve(self.A, X.T, + # check_finite=False).dot(self.Lambda_n).dot(y) - beta, alpha, gamma = self._parse_hyps(hyp, X) - - if self.verbose: - print("estimating posterior ... | hyp=", hyp) - - # prior variance - if len(alpha) == 1 or len(alpha) == D: - self.Sigma_a = np.diag(np.ones(D))/alpha - self.Lambda_a = np.diag(np.ones(D))*alpha - else: - raise ValueError("hyperparameter vector has invalid length") - - # compute posterior precision and mean - self.A = X.T.dot(self.Lambda_n).dot(X) + self.Lambda_a - self.m = linalg.solve(self.A, X.T, - check_finite=False).dot(self.Lambda_n).dot(y) - #self.m = linalg.lstsq(self.A, X.T, - # check_finite=False)[0].dot(self.Lambda_n).dot(y) - - # save stuff - self.N = N - self.D = D - self.hyp = hyp
- -
[docs] def loglik(self, hyp, X, y): - """ Function to compute compute log (marginal) likelihood """ - - # hyperparameters (alpha not needed) - beta, alpha, gamma = self._parse_hyps(hyp, X) - - # warp the likelihood? - if self.warp is not None: - if self.verbose: - print('warping input...') - y_unwarped = y - y = self.warp.f(y, gamma) + XtLambda_n = X.T*self.lambda_n_vec + self.A = XtLambda_n.dot(X) + self.Lambda_a + invAXt = linalg.solve(self.A, X.T, check_finite=False) + self.m = (invAXt*self.lambda_n_vec).dot(y) + + # save stuff + self.N = N + self.D = D + self.hyp = hyp
+ +
[docs] def loglik(self, hyp, X, y, Xv=None): + """ Function to compute compute log (marginal) likelihood """ + + # hyperparameters (alpha not needed) + beta, alpha, gamma = self._parse_hyps(hyp, X, Xv) + + # warp the likelihood? + if self.warp is not None: + if self.verbose: + print('warping input...') + y_unwarped = y + y = self.warp.f(y, gamma) - # load posterior and prior covariance - if (hyp != self.hyp).any() or not(hasattr(self, 'A')): - try: - self.post(hyp, X, y) - except ValueError: - print("Warning: Estimation of posterior distribution failed") - nlZ = 1/np.finfo(float).eps - return nlZ + # load posterior and prior covariance + if (hyp != self.hyp).any() or not(hasattr(self, 'A')): + try: + self.post(hyp, X, y, Xv) + except ValueError: + print("Warning: Estimation of posterior distribution failed") + nlZ = 1/np.finfo(float).eps + return nlZ - try: - # compute the log determinants in a numerically stable way - logdetA = 2*sum(np.log(np.diag(np.linalg.cholesky(self.A)))) - except (ValueError, LinAlgError): - print("Warning: Estimation of posterior distribution failed") - nlZ = 1/np.finfo(float).eps - return nlZ - - logdetSigma_a = sum(np.log(np.diag(self.Sigma_a))) # diagonal - logdetSigma_n = sum(np.log(np.diag(self.Sigma_n))) - - # compute negative marginal log likelihood - nlZ = -0.5 * (-self.N*np.log(2*np.pi) - - logdetSigma_n - - logdetSigma_a - - (y-X.dot(self.m)).T.dot(self.Lambda_n).dot(y-X.dot(self.m)) - - self.m.T.dot(self.Lambda_a).dot(self.m) - - logdetA - ) + try: + # compute the log determinants in a numerically stable way + logdetA = 2*sum(np.log(np.diag(np.linalg.cholesky(self.A)))) + except (ValueError, LinAlgError): + print("Warning: Estimation of posterior distribution failed") + nlZ = 1/np.finfo(float).eps + return nlZ + + logdetSigma_a = sum(np.log(np.diag(self.Sigma_a))) # diagonal + logdetSigma_n = sum(np.log(1/self.lambda_n_vec)) - if self.warp is not None: - # add in the Jacobian - nlZ = nlZ - sum(np.log(self.warp.df(y_unwarped, gamma))) - - # make sure the output is finite to stop the minimizer getting upset - if not np.isfinite(nlZ): - nlZ = 1/np.finfo(float).eps - - if self.verbose: - print("nlZ= ", nlZ, " | hyp=", hyp) + # compute negative marginal log likelihood + X_y_t_sLambda_n = (y-X.dot(self.m))*np.sqrt(self.lambda_n_vec) + nlZ = -0.5 * (-self.N*np.log(2*np.pi) - + logdetSigma_n - + logdetSigma_a - + X_y_t_sLambda_n.T.dot(X_y_t_sLambda_n) - + self.m.T.dot(self.Lambda_a).dot(self.m) - + logdetA + ) + + + if self.warp is not None: + # add in the Jacobian + nlZ = nlZ - sum(np.log(self.warp.df(y_unwarped, gamma))) - self.nlZ = nlZ - return nlZ
+ # make sure the output is finite to stop the minimizer getting upset + if not np.isfinite(nlZ): + nlZ = 1/np.finfo(float).eps -
[docs] def dloglik(self, hyp, X, y): - """ Function to compute derivatives """ + if self.verbose: + print("nlZ= ", nlZ, " | hyp=", hyp) - # hyperparameters - beta, alpha, gamma = self._parse_hyps(hyp, X) + self.nlZ = nlZ + return nlZ
+ +
[docs] def penalized_loglik(self, hyp, X, y, Xv=None, l=0.1, norm='L1'): + """ Function to compute the penalized log (marginal) likelihood + + :param hyp: hyperparameter vector + :param X: covariates + :param y: responses + :param Xv: covariates for heteroskedastic noise + :param l: regularisation penalty + :param norm: type of regulariser (L1 or L2) + """ + + if norm.lower() == 'l1': + L = self.loglik(hyp, X, y, Xv) + l * sum(abs(hyp)) + elif norm.lower() == 'l2': + L = self.loglik(hyp, X, y, Xv) + l * sum(np.sqrt(hyp**2)) + else: + print("Requested penalty not recognized, choose between 'L1' or 'L2'.") + return L
+ +
[docs] def dloglik(self, hyp, X, y, Xv=None): + """ Function to compute derivatives """ + + # hyperparameters + beta, alpha, gamma = self._parse_hyps(hyp, X, Xv) - if self.warp is not None: - raise ValueError('optimization with derivatives is not yet ' + \ - 'supported for warped liklihood') - - # load posterior and prior covariance - if (hyp != self.hyp).any() or not(hasattr(self, 'A')): - try: - self.post(hyp, X, y) - except ValueError: - print("Warning: Estimation of posterior distribution failed") - dnlZ = np.sign(self.dnlZ) / np.finfo(float).eps - return dnlZ - - # precompute re-used quantities to maximise speed - # todo: revise implementation to use Cholesky throughout - # that would remove the need to explicitly compute the inverse - S = np.linalg.inv(self.A) # posterior covariance - SX = S.dot(X.T) - XLn = X.T.dot(self.Lambda_n) - XLny = XLn.dot(y) - SXLny = S.dot(XLny) - XLnXm = XLn.dot(X).dot(self.m) + if self.warp is not None: + raise ValueError('optimization with derivatives is not yet ' + \ + 'supported for warped liklihood') + + # load posterior and prior covariance + if (hyp != self.hyp).any() or not(hasattr(self, 'A')): + try: + self.post(hyp, X, y, Xv) + except ValueError: + print("Warning: Estimation of posterior distribution failed") + dnlZ = np.sign(self.dnlZ) / np.finfo(float).eps + return dnlZ + + # precompute re-used quantities to maximise speed + # todo: revise implementation to use Cholesky throughout + # that would remove the need to explicitly compute the inverse + S = np.linalg.inv(self.A) # posterior covariance + SX = S.dot(X.T) + XLn = X.T*self.lambda_n_vec # = X.T.dot(self.Lambda_n) + XLny = XLn.dot(y) + SXLny = S.dot(XLny) + XLnXm = XLn.dot(X).dot(self.m) - # initialise derivatives - dnlZ = np.zeros(hyp.shape) - - # noise precision parameter(s) - for i in range(0, len(beta)): - # first compute derivative of Lambda_n with respect to beta - dL_n_vec = np.zeros(self.N) - if self.var_groups is None: - dL_n_vec = np.ones(self.N) - else: - dL_n_vec[np.where(self.var_groups == self.var_ids[i])[0]] = 1 - dLambda_n = np.diag(dL_n_vec) + # initialise derivatives + dnlZ = np.zeros(hyp.shape) + dnl2 = np.zeros(hyp.shape) + + # noise precision parameter(s) + for i in range(0, len(beta)): + # first compute derivative of Lambda_n with respect to beta + dL_n_vec = np.zeros(self.N) + if self.var_groups is None: + dL_n_vec = np.ones(self.N) + else: + dL_n_vec[np.where(self.var_groups == self.var_ids[i])[0]] = 1 + dLambda_n = np.diag(dL_n_vec) - # compute quantities used multiple times - XdLnX = X.T.dot(dLambda_n).dot(X) - dA = XdLnX + # compute quantities used multiple times + XdLnX = X.T.dot(dLambda_n).dot(X) + dA = XdLnX - # derivative of posterior parameters with respect to beta - b = -S.dot(dA).dot(SXLny) + SX.dot(dLambda_n).dot(y) + # derivative of posterior parameters with respect to beta + b = -S.dot(dA).dot(SXLny) + SX.dot(dLambda_n).dot(y) - # compute np.trace(self.Sigma_n.dot(dLambda_n)) efficiently - trSigma_ndLambda_n = sum(np.diag(self.Sigma_n)*np.diag(dLambda_n)) + # compute np.trace(self.Sigma_n.dot(dLambda_n)) efficiently + trSigma_ndLambda_n = sum((1/self.lambda_n_vec)*np.diag(dLambda_n)) + + # compute y.T.dot(Lambda_n) efficiently + ytLn = (y*self.lambda_n_vec).T - dnlZ[i] = - (0.5 * trSigma_ndLambda_n - - 0.5 * y.dot(dLambda_n).dot(y) + - y.dot(dLambda_n).dot(X).dot(self.m) + - y.T.dot(self.Lambda_n).dot(X).dot(b) - - 0.5 * self.m.T.dot(XdLnX).dot(self.m) - - b.T.dot(XLnXm) - - b.T.dot(self.Lambda_a).dot(self.m) - - 0.5 * np.trace(S.dot(dA)) - ) * beta[i] - - # scaling parameter(s) - for i in range(0, len(alpha)): - # first compute derivatives with respect to alpha - if len(alpha) == self.D: # are we using ARD? - dLambda_a = np.zeros((self.D, self.D)) - dLambda_a[i, i] = 1 - else: - dLambda_a = np.eye(self.D) - - F = dLambda_a - c = -S.dot(F).dot(SXLny) + # compute derivatives + dnlZ[i] = - (0.5 * trSigma_ndLambda_n - + 0.5 * y.dot(dLambda_n).dot(y) + + y.dot(dLambda_n).dot(X).dot(self.m) + + ytLn.dot(X).dot(b) - + 0.5 * self.m.T.dot(XdLnX).dot(self.m) - + b.T.dot(XLnXm) - + b.T.dot(self.Lambda_a).dot(self.m) - + 0.5 * np.trace(S.dot(dA)) + ) * beta[i] + + # scaling parameter(s) + for i in range(0, len(alpha)): + # first compute derivatives with respect to alpha + if len(alpha) == self.D: # are we using ARD? + dLambda_a = np.zeros((self.D, self.D)) + dLambda_a[i, i] = 1 + else: + dLambda_a = np.eye(self.D) + + F = dLambda_a + c = -S.dot(F).dot(SXLny) - # compute np.trace(self.Sigma_a.dot(dLambda_a)) efficiently - trSigma_adLambda_a = sum(np.diag(self.Sigma_a)*np.diag(dLambda_a)) + # compute np.trace(self.Sigma_a.dot(dLambda_a)) efficiently + trSigma_adLambda_a = sum(np.diag(self.Sigma_a)*np.diag(dLambda_a)) - dnlZ[i+len(beta)] = -(0.5* trSigma_adLambda_a + - XLny.T.dot(c) - - c.T.dot(XLnXm) - - c.T.dot(self.Lambda_a).dot(self.m) - - 0.5 * self.m.T.dot(F).dot(self.m) - - 0.5*np.trace(linalg.solve(self.A, F)) - ) * alpha[i] - - # make sure the gradient is finite to stop the minimizer getting upset - if not all(np.isfinite(dnlZ)): - bad = np.where(np.logical_not(np.isfinite(dnlZ))) - for b in bad: - dnlZ[b] = np.sign(self.dnlZ[b]) / np.finfo(float).eps - - if self.verbose: - print("dnlZ= ", dnlZ, " | hyp=", hyp) - - self.dnlZ = dnlZ - return dnlZ
- - # model estimation (optimization) -
[docs] def estimate(self, hyp0, X, y, optimizer='cg'): - """ Function to estimate the model """ - - if optimizer.lower() == 'cg': # conjugate gradients - out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (X, y), - disp=True, gtol=self.tol, - maxiter=self.n_iter, full_output=1) - - elif optimizer.lower() == 'powell': # Powell's method - out = optimize.fmin_powell(self.loglik, hyp0, (X, y), - full_output=1) - else: - raise ValueError("unknown optimizer") - - self.hyp = out[0] - self.nlZ = out[1] - self.optimizer = optimizer - - return self.hyp
- -
[docs] def predict(self, hyp, X, y, Xs, var_groups_test=None): - """ Function to make predictions from the model """ - - if X is None or y is None: - # set hyperparameters. we can use an array of zeros because - beta, alpha, gamma = self._parse_hyps(hyp, np.zeros((self.N, 1))) - else: + dnlZ[i+len(beta)] = -(0.5* trSigma_adLambda_a + + XLny.T.dot(c) - + c.T.dot(XLnXm) - + c.T.dot(self.Lambda_a).dot(self.m) - + 0.5 * self.m.T.dot(F).dot(self.m) - + 0.5*np.trace(linalg.solve(self.A, F)) + ) * alpha[i] + + # make sure the gradient is finite to stop the minimizer getting upset + if not all(np.isfinite(dnlZ)): + bad = np.where(np.logical_not(np.isfinite(dnlZ))) + for b in bad: + dnlZ[b] = np.sign(self.dnlZ[b]) / np.finfo(float).eps + + if self.verbose: + print("dnlZ= ", dnlZ, " | hyp=", hyp) + + self.dnlZ = dnlZ + return dnlZ
+ + # model estimation (optimization) +
[docs] def estimate(self, hyp0, X, y, **kwargs): + """ Function to estimate the model + + :param hyp: hyperparameter vector + :param X: covariates + :param y: responses + :param optimizer: optimisation algorithm ('cg','powell','nelder-mead','l0bfgs-b') + """ + + optimizer = kwargs.get('optimizer','cg') + + # covariates for heteroskedastic noise + Xv = kwargs.get('var_covariates', None) + + # options for l-bfgs-b + l = kwargs.get('l', 0.1) + epsilon = kwargs.get('epsilon', 0.1) + norm = kwargs.get('norm', 'l2') + + if optimizer.lower() == 'cg': # conjugate gradients + out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (X, y, Xv), + disp=True, gtol=self.tol, + maxiter=self.n_iter, full_output=1) + elif optimizer.lower() == 'powell': # Powell's method + out = optimize.fmin_powell(self.loglik, hyp0, (X, y, Xv), + full_output=1) + elif optimizer.lower() == 'nelder-mead': + out = optimize.fmin(self.loglik, hyp0, (X, y, Xv), + full_output=1) + elif optimizer.lower() == 'l-bfgs-b': + out = optimize.fmin_l_bfgs_b(self.penalized_loglik, x0=hyp0, + args=(X, y, Xv, l, norm), approx_grad=True, + epsilon=epsilon) + else: + raise ValueError("unknown optimizer") + + self.hyp = out[0] + self.nlZ = out[1] + self.optimizer = optimizer + + return self.hyp
+ +
[docs] def predict(self, hyp, X, y, Xs, + var_groups_test=None, + var_covariates_test=None, **kwargs): + """ Function to make predictions from the model + + :param hyp: hyperparameter vector + :param X: covariates for training data + :param y: responses for training data + :param Xs: covariates for test data + :param var_covariates_test: test covariates for heteroskedastic noise + + This always returns Gaussian predictions, i.e. + + :returns: * ys - predictive mean + * s2 - predictive variance + """ + + Xvs = var_covariates_test + if Xvs is not None and len(Xvs.shape) == 1: + Xvs = Xvs[:, np.newaxis] + + if X is None or y is None: + # set dummy hyperparameters + beta, alpha, gamma = self._parse_hyps(hyp, np.zeros((self.N, self.D)), Xvs) + else: - # set hyperparameters - beta, alpha, gamma = self._parse_hyps(hyp, X) + # set hyperparameters + beta, alpha, gamma = self._parse_hyps(hyp, X, Xvs) - # do we need to re-estimate the posterior? - if (hyp != self.hyp).any() or not(hasattr(self, 'A')): - # warp the likelihood? - if self.warp is not None: - if self.verbose: - print('warping input...') - y = self.warp.f(y, gamma) - self.post(hyp, X, y) - - N_test = Xs.shape[0] - - ys = Xs.dot(self.m) + # do we need to re-estimate the posterior? + if (hyp != self.hyp).any() or not(hasattr(self, 'A')): + raise(ValueError, 'posterior not properly estimated') + + N_test = Xs.shape[0] + + ys = Xs.dot(self.m) - if self.var_groups is None: - s2n = 1/beta - else: - if len(var_groups_test) != N_test: - raise(ValueError, 'Invalid variance groups for test') - # separate variance groups - s2n = np.ones(N_test) - for v in range(len(self.var_ids)): - s2n[var_groups_test == self.var_ids[v]] = 1/beta[v] + if self.var_groups is not None: + if len(var_groups_test) != N_test: + raise(ValueError, 'Invalid variance groups for test') + # separate variance groups + s2n = np.ones(N_test) + for v in range(len(self.var_ids)): + s2n[var_groups_test == self.var_ids[v]] = 1/beta[v] + else: + s2n = 1/beta + + # compute xs.dot(S).dot(xs.T) avoiding computing off-diagonal entries + s2 = s2n + np.sum(Xs*linalg.solve(self.A, Xs.T).T, axis=1) - # compute xs.dot(S).dot(xs.T) avoiding computing off-diagonal entries - s2 = s2n + np.sum(Xs*linalg.solve(self.A, Xs.T).T, axis=1) + return ys, s2
+ +
[docs] def predict_and_adjust(self, hyp, X, y, Xs=None, + ys=None, + var_groups_test=None, + var_groups_adapt=None, **kwargs): + """ Function to transfer the model to a new site. This is done by + first making predictions on the adaptation data given by X, + adjusting by the residuals with respect to y. + + :param hyp: hyperparameter vector + :param X: covariates for adaptation (i.e. calibration) data + :param y: responses for adaptation data + :param Xs: covariate data (for which predictions should be adjusted) + :param ys: true response variables (to be adjusted) + :param var_groups_test: variance groups (e.g. sites) for test data + :param var_groups_adapt: variance groups for adaptation data + + There are two possible ways of using this function, depending on + whether ys or Xs is specified + + If ys is specified, this is applied directly to the data, which is + assumed to be in the input space (i.e. not warped). In this case + the adjusted true data points are returned in the same space + + Alternatively, Xs is specified, then the predictions are made and + adjusted. In this case the predictive variance are returned in the + warped (i.e. Gaussian) space. + + This function needs to know which sites are associated with which + data points, which provided by var_groups_xxx, which is a list or + array of scalar ids . + """ + + if ys is None: + if Xs is None: + raise ValueError('Either ys or Xs must be specified') + else: + N = Xs.shape[0] + else: + if len(ys.shape) < 1: + raise ValueError('ys is specified but has insufficent length') + N = ys.shape[0] + + if var_groups_test is None: + var_groups_test = np.ones(N) + var_groups_adapt = np.ones(X.shape[0]) + + ys_out = np.zeros(N) + s2_out = np.zeros(N) + for g in np.unique(var_groups_test): + idx_s = var_groups_test == g + idx_a = var_groups_adapt == g + + if sum(idx_a) < 2: + raise ValueError('Insufficient adaptation data to estimate variance') + + # Get predictions from old model on new data X + ys_ref, s2_ref = self.predict(hyp, None, None, X[idx_a,:]) + + # Subtract the predictions from true data to get the residuals + if self.warp is None: + residuals = ys_ref-y[idx_a] + else: + # Calculate the residuals in warped space + y_ref_ws = self.warp.f(y[idx_a], hyp[1:self.warp.get_n_params()+1]) + residuals = ys_ref - y_ref_ws + + residuals_mu = np.mean(residuals) + residuals_sd = np.std(residuals) + + # Adjust the mean with the mean of the residuals + if ys is None: + # make and adjust predictions + ys_out[idx_s], s2_out[idx_s] = self.predict(hyp, None, None, Xs[idx_s,:]) + ys_out[idx_s] = ys_out[idx_s] - residuals_mu + + # Set the deviation to the devations of the residuals + s2_out[idx_s] = np.ones(len(s2_out[idx_s]))*residuals_sd**2 + else: + # adjust the data + if self.warp is not None: + y_ws = self.warp.f(ys[idx_s], hyp[1:self.warp.get_n_params()+1]) + ys_out[idx_s] = y_ws + residuals_mu + ys_out[idx_s] = self.warp.invf(ys_out[idx_s], hyp[1:self.warp.get_n_params()+1]) + else: + ys = ys - residuals_mu + s2_out = None + + return ys_out, s2_out
- return ys, s2
-
+
+
+
+ +
+ +
+

+ © Copyright 2020, Andre F. Marquand. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
- -
-
- - - + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_modules/fileio.html b/doc/build/html/_modules/fileio.html index 4718eed8..482658f5 100644 --- a/doc/build/html/_modules/fileio.html +++ b/doc/build/html/_modules/fileio.html @@ -1,493 +1,639 @@ - - - - - - - fileio — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + fileio — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
-
-
-
+ + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+

Source code for fileio

-from __future__ import print_function
-
-import os
-import sys
-import numpy as np
-import nibabel as nib
-import tempfile
-import pandas as pd
-import re
-
-try:  # run as a package if installed
-    from pcntoolkit import configs
-except ImportError:
-    pass
-
-    path = os.path.abspath(os.path.dirname(__file__))
-    if path not in sys.path:
-        sys.path.append(path)
-    del path
-    import configs
-
-CIFTI_MAPPINGS = ('dconn', 'dtseries', 'pconn', 'ptseries', 'dscalar',
-                  'dlabel', 'pscalar', 'pdconn', 'dpconn',
-                  'pconnseries', 'pconnscalar')
-
-CIFTI_VOL_ATLAS = 'Atlas_ROIs.2.nii.gz'
-
-PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL
-
-# ------------------------
-# general utility routines
-# ------------------------
-
-
[docs]def predictive_interval(s2_forward, - cov_forward, - multiplicator): - # calculates a predictive interval +from __future__ import print_function + +import os +import sys +import numpy as np +import nibabel as nib +import tempfile +import pandas as pd +import re + +try: # run as a package if installed + from pcntoolkit import configs +except ImportError: + pass + + path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.dirname(path) # parent directory + if path not in sys.path: + sys.path.append(path) + del path + import configs + +CIFTI_MAPPINGS = ('dconn', 'dtseries', 'pconn', 'ptseries', 'dscalar', + 'dlabel', 'pscalar', 'pdconn', 'dpconn', + 'pconnseries', 'pconnscalar') + +CIFTI_VOL_ATLAS = 'Atlas_ROIs.2.nii.gz' + +PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL + +# ------------------------ +# general utility routines +# ------------------------ + +
[docs]def predictive_interval(s2_forward, + cov_forward, + multiplicator): + # calculates a predictive interval - PI=np.zeros(len(cov_forward)) - for i,xdot in enumerate(cov_forward): - s=np.sqrt(s2_forward[i]) - PI[i]=multiplicator*s - return PI
- -
[docs]def create_mask(data_array, mask, verbose=False): - # create a (volumetric) mask either from an input nifti or the nifti itself - - if mask is not None: - if verbose: - print('Loading ROI mask ...') - maskvol = load_nifti(mask, vol=True) - maskvol = maskvol != 0 - else: - if len(data_array.shape) < 4: - dim = data_array.shape[0:3] + (1,) - else: - dim = data_array.shape[0:3] + (data_array.shape[3],) - - if verbose: - print('Generating mask automatically ...') - if dim[3] == 1: - maskvol = data_array[:, :, :] != 0 - else: - maskvol = data_array[:, :, :, 0] != 0 - - return maskvol
- - -
[docs]def vol2vec(dat, mask, verbose=False): - # vectorise a 3d image - - if len(dat.shape) < 4: - dim = dat.shape[0:3] + (1,) - else: - dim = dat.shape[0:3] + (dat.shape[3],) + PI=np.zeros(len(cov_forward)) + for i,xdot in enumerate(cov_forward): + s=np.sqrt(s2_forward[i]) + PI[i]=multiplicator*s + return PI
+ +
[docs]def create_mask(data_array, mask, verbose=False): + # create a (volumetric) mask either from an input nifti or the nifti itself + + if mask is not None: + if verbose: + print('Loading ROI mask ...') + maskvol = load_nifti(mask, vol=True) + maskvol = maskvol != 0 + else: + if len(data_array.shape) < 4: + dim = data_array.shape[0:3] + (1,) + else: + dim = data_array.shape[0:3] + (data_array.shape[3],) + + if verbose: + print('Generating mask automatically ...') + if dim[3] == 1: + maskvol = data_array[:, :, :] != 0 + else: + maskvol = data_array[:, :, :, 0] != 0 + + return maskvol
+ + +
[docs]def vol2vec(dat, mask, verbose=False): + # vectorise a 3d image + + if len(dat.shape) < 4: + dim = dat.shape[0:3] + (1,) + else: + dim = dat.shape[0:3] + (dat.shape[3],) - #mask = create_mask(dat, mask=mask, verbose=verbose) - if mask is None: - mask = create_mask(dat, mask=mask, verbose=verbose) + #mask = create_mask(dat, mask=mask, verbose=verbose) + if mask is None: + mask = create_mask(dat, mask=mask, verbose=verbose) - # mask the image - maskid = np.where(mask.ravel())[0] - dat = np.reshape(dat, (np.prod(dim[0:3]), dim[3])) - dat = dat[maskid, :] + # mask the image + maskid = np.where(mask.ravel())[0] + dat = np.reshape(dat, (np.prod(dim[0:3]), dim[3])) + dat = dat[maskid, :] - # convert to 1-d array if the file only contains one volume - if dim[3] == 1: - dat = dat.ravel() + # convert to 1-d array if the file only contains one volume + if dim[3] == 1: + dat = dat.ravel() - return dat
+ return dat
-
[docs]def file_type(filename): - # routine to determine filetype +
[docs]def file_type(filename): + # routine to determine filetype - if filename.endswith(('.dtseries.nii', '.dscalar.nii', '.dlabel.nii')): - ftype = 'cifti' - elif filename.endswith(('.nii.gz', '.nii', '.img', '.hdr')): - ftype = 'nifti' - elif filename.endswith(('.txt', '.csv', '.tsv', '.asc')): - ftype = 'text' - elif filename.endswith(('.pkl')): - ftype = 'binary' - else: - raise ValueError("I don't know what to do with " + filename) + if filename.endswith(('.dtseries.nii', '.dscalar.nii', '.dlabel.nii')): + ftype = 'cifti' + elif filename.endswith(('.nii.gz', '.nii', '.img', '.hdr')): + ftype = 'nifti' + elif filename.endswith(('.txt', '.csv', '.tsv', '.asc')): + ftype = 'text' + elif filename.endswith(('.pkl')): + ftype = 'binary' + else: + raise ValueError("I don't know what to do with " + filename) - return ftype
+ return ftype
-
[docs]def file_extension(filename): - # routine to get the full file extension (e.g. .nii.gz, not just .gz) +
[docs]def file_extension(filename): + # routine to get the full file extension (e.g. .nii.gz, not just .gz) - parts = filename.split(os.extsep) + parts = filename.split(os.extsep) - if parts[-1] == 'gz': - if parts[-2] == 'nii' or parts[-2] == 'img' or parts[-2] == 'hdr': - ext = parts[-2] + '.' + parts[-1] - else: - ext = parts[-1] - elif parts[-1] == 'nii': - if parts[-2] in CIFTI_MAPPINGS: - ext = parts[-2] + '.' + parts[-1] - else: - ext = parts[-1] - else: - ext = parts[-1] - - ext = '.' + ext - return ext
- - -
[docs]def file_stem(filename): - - idx = filename.find(file_extension(filename)) - stm = filename[0:idx] - - return stm
- -# -------------- -# nifti routines -# -------------- - - -
[docs]def load_nifti(datafile, mask=None, vol=False, verbose=False): - - if verbose: - print('Loading nifti: ' + datafile + ' ...') - img = nib.load(datafile) - dat = img.get_data() - - if mask is not None: - mask=load_nifti(mask, vol=True) - - if not vol: - dat = vol2vec(dat, mask) - - return dat
- - -
[docs]def save_nifti(data, filename, examplenii, mask): - """ Write output to nifti """ - - # load mask - if isinstance(mask, str): - mask = load_nifti(mask, vol=True) - mask = mask != 0 - - # load example image - ex_img = nib.load(examplenii) - ex_img.shape - dim = ex_img.shape[0:3] - if len(data.shape) < 2: - nvol = 1 - data = data[:, np.newaxis] - else: - nvol = int(data.shape[1]) - - # write data - array_data = np.zeros((np.prod(dim), nvol)) - array_data[mask.flatten(), :] = data - array_data = np.reshape(array_data, dim+(nvol,)) - array_img = nib.Nifti1Image(array_data, ex_img.affine, ex_img.header) - nib.save(array_img, filename)
- -# -------------- -# cifti routines -# -------------- - - -
[docs]def load_cifti(filename, vol=False, mask=None, rmtmp=True): - - # parse the name - dnam, fnam = os.path.split(filename) - fpref = file_stem(fnam) - outstem = os.path.join(tempfile.gettempdir(), - str(os.getpid()) + "-" + fpref) - - # extract surface data from the cifti file - print("Extracting cifti surface data to ", outstem, '-*.func.gii', sep="") - giinamel = outstem + '-left.func.gii' - giinamer = outstem + '-right.func.gii' - os.system('wb_command -cifti-separate ' + filename + - ' COLUMN -metric CORTEX_LEFT ' + giinamel) - os.system('wb_command -cifti-separate ' + filename + - ' COLUMN -metric CORTEX_RIGHT ' + giinamer) - - # load the surface data - giil = nib.load(giinamel) - giir = nib.load(giinamer) - Nimg = len(giil.darrays) - Nvert = len(giil.darrays[0].data) - if Nimg == 1: - out = np.concatenate((giil.darrays[0].data, giir.darrays[0].data), - axis=0) - else: - Gl = np.zeros((Nvert, Nimg)) - Gr = np.zeros((Nvert, Nimg)) - for i in range(0, Nimg): - Gl[:, i] = giil.darrays[i].data - Gr[:, i] = giir.darrays[i].data - out = np.concatenate((Gl, Gr), axis=0) - if rmtmp: - # clean up temporary files - os.remove(giinamel) - os.remove(giinamer) - - if vol: - niiname = outstem + '-vol.nii' - print("Extracting cifti volume data to ", niiname, sep="") - os.system('wb_command -cifti-separate ' + filename + - ' COLUMN -volume-all ' + niiname) - vol = load_nifti(niiname, vol=True) - volmask = create_mask(vol) - out = np.concatenate((out, vol2vec(vol, volmask)), axis=0) - if rmtmp: - os.remove(niiname) - - return out
- - -
[docs]def save_cifti(data, filename, example, mask=None, vol=True, volatlas=None): - """ Write output to nifti """ - - # do some sanity checks - if data.dtype == 'float32' or \ - data.dtype == 'float' or \ - data.dtype == 'float64': - data = data.astype('float32') # force 32 bit output - dtype = 'NIFTI_TYPE_FLOAT32' - else: - raise(ValueError, 'Only float data types currently handled') - - if len(data.shape) == 1: - Nimg = 1 - data = data[:, np.newaxis] - else: - Nimg = data.shape[1] - - # get the base filename - dnam, fnam = os.path.split(filename) - fstem = file_stem(fnam) - - # Split the template - estem = os.path.join(tempfile.gettempdir(), str(os.getpid()) + "-" + fstem) - giiexnamel = estem + '-left.func.gii' - giiexnamer = estem + '-right.func.gii' - os.system('wb_command -cifti-separate ' + example + - ' COLUMN -metric CORTEX_LEFT ' + giiexnamel) - os.system('wb_command -cifti-separate ' + example + - ' COLUMN -metric CORTEX_RIGHT ' + giiexnamer) - - # write left hemisphere - giiexl = nib.load(giiexnamel) - Nvertl = len(giiexl.darrays[0].data) - garraysl = [] - for i in range(0, Nimg): - garraysl.append( - nib.gifti.gifti.GiftiDataArray(data=data[0:Nvertl, i], - datatype=dtype)) - giil = nib.gifti.gifti.GiftiImage(darrays=garraysl) - fnamel = fstem + '-left.func.gii' - nib.save(giil, fnamel) - - # write right hemisphere - giiexr = nib.load(giiexnamer) - Nvertr = len(giiexr.darrays[0].data) - garraysr = [] - for i in range(0, Nimg): - garraysr.append( - nib.gifti.gifti.GiftiDataArray(data=data[Nvertl:Nvertl+Nvertr, i], - datatype=dtype)) - giir = nib.gifti.gifti.GiftiImage(darrays=garraysr) - fnamer = fstem + '-right.func.gii' - nib.save(giir, fnamer) - - tmpfiles = [fnamer, fnamel, giiexnamel, giiexnamer] - - # process volumetric data - if vol: - niiexname = estem + '-vol.nii' - os.system('wb_command -cifti-separate ' + example + - ' COLUMN -volume-all ' + niiexname) - niivol = load_nifti(niiexname, vol=True) - if mask is None: - mask = create_mask(niivol) - - if volatlas is None: - volatlas = CIFTI_VOL_ATLAS - fnamev = fstem + '-vol.nii' - - save_nifti(data[Nvertr+Nvertl:, :], fnamev, niiexname, mask) - tmpfiles.extend([fnamev, niiexname]) - - # write cifti - fname = fstem + '.dtseries.nii' - os.system('wb_command -cifti-create-dense-timeseries ' + fname + - ' -volume ' + fnamev + ' ' + volatlas + - ' -left-metric ' + fnamel + ' -right-metric ' + fnamer) - - # clean up - for f in tmpfiles: - os.remove(f)
- -# -------------- -# ascii routines -# -------------- - - -
[docs]def load_pd(filename): - # based on pandas - x = pd.read_csv(filename, - sep=' ', - header=None) - return x
- - -
[docs]def save_pd(data, filename): - # based on pandas - data.to_csv(filename, - index=None, - header=None, - sep=' ', - na_rep='NaN')
- - -
[docs]def load_ascii(filename): - # based on pandas - x = np.loadtxt(filename) - return x
- - -
[docs]def save_ascii(data, filename): - # based on pandas - np.savetxt(filename, data)
- -# ---------------- -# generic routines -# ---------------- - - -
[docs]def save(data, filename, example=None, mask=None, text=False): - - if file_type(filename) == 'cifti': - save_cifti(data.T, filename, example, vol=True) - elif file_type(filename) == 'nifti': - save_nifti(data.T, filename, example, mask) - elif text or file_type(filename) == 'text': - save_ascii(data, filename) - elif file_type(filename) == 'binary': - data = pd.DataFrame(data) - data.to_pickle(filename, protocol=PICKLE_PROTOCOL)
- - -
[docs]def load(filename, mask=None, text=False, vol=True): - - if file_type(filename) == 'cifti': - x = load_cifti(filename, vol=vol) - elif file_type(filename) == 'nifti': - x = load_nifti(filename, mask) - elif text or file_type(filename) == 'text': - x = load_ascii(filename) - elif file_type(filename) == 'binary': - x = pd.read_pickle(filename) - x = x.to_numpy() + if parts[-1] == 'gz': + if parts[-2] == 'nii' or parts[-2] == 'img' or parts[-2] == 'hdr': + ext = parts[-2] + '.' + parts[-1] + else: + ext = parts[-1] + elif parts[-1] == 'nii': + if parts[-2] in CIFTI_MAPPINGS: + ext = parts[-2] + '.' + parts[-1] + else: + ext = parts[-1] + else: + ext = parts[-1] + + ext = '.' + ext + return ext
+ + +
[docs]def file_stem(filename): + + idx = filename.find(file_extension(filename)) + stm = filename[0:idx] + + return stm
+ +# -------------- +# nifti routines +# -------------- + + +
[docs]def load_nifti(datafile, mask=None, vol=False, verbose=False): + + if verbose: + print('Loading nifti: ' + datafile + ' ...') + img = nib.load(datafile) + dat = img.get_data() + + if mask is not None: + mask=load_nifti(mask, vol=True) + + if not vol: + dat = vol2vec(dat, mask) + + return dat
+ + +
[docs]def save_nifti(data, filename, examplenii, mask): + """ Write output to nifti """ + + # load mask + if isinstance(mask, str): + mask = load_nifti(mask, vol=True) + mask = mask != 0 + + # load example image + ex_img = nib.load(examplenii) + ex_img.shape + dim = ex_img.shape[0:3] + if len(data.shape) < 2: + nvol = 1 + data = data[:, np.newaxis] + else: + nvol = int(data.shape[1]) + + # write data + array_data = np.zeros((np.prod(dim), nvol)) + array_data[mask.flatten(), :] = data + array_data = np.reshape(array_data, dim+(nvol,)) + array_img = nib.Nifti1Image(array_data, ex_img.affine, ex_img.header) + nib.save(array_img, filename)
+ +# -------------- +# cifti routines +# -------------- + + +
[docs]def load_cifti(filename, vol=False, mask=None, rmtmp=True): + + # parse the name + dnam, fnam = os.path.split(filename) + fpref = file_stem(fnam) + outstem = os.path.join(tempfile.gettempdir(), + str(os.getpid()) + "-" + fpref) + + # extract surface data from the cifti file + print("Extracting cifti surface data to ", outstem, '-*.func.gii', sep="") + giinamel = outstem + '-left.func.gii' + giinamer = outstem + '-right.func.gii' + os.system('wb_command -cifti-separate ' + filename + + ' COLUMN -metric CORTEX_LEFT ' + giinamel) + os.system('wb_command -cifti-separate ' + filename + + ' COLUMN -metric CORTEX_RIGHT ' + giinamer) + + # load the surface data + giil = nib.load(giinamel) + giir = nib.load(giinamer) + Nimg = len(giil.darrays) + Nvert = len(giil.darrays[0].data) + if Nimg == 1: + out = np.concatenate((giil.darrays[0].data, giir.darrays[0].data), + axis=0) + else: + Gl = np.zeros((Nvert, Nimg)) + Gr = np.zeros((Nvert, Nimg)) + for i in range(0, Nimg): + Gl[:, i] = giil.darrays[i].data + Gr[:, i] = giir.darrays[i].data + out = np.concatenate((Gl, Gr), axis=0) + if rmtmp: + # clean up temporary files + os.remove(giinamel) + os.remove(giinamer) + + if vol: + niiname = outstem + '-vol.nii' + print("Extracting cifti volume data to ", niiname, sep="") + os.system('wb_command -cifti-separate ' + filename + + ' COLUMN -volume-all ' + niiname) + vol = load_nifti(niiname, vol=True) + volmask = create_mask(vol) + out = np.concatenate((out, vol2vec(vol, volmask)), axis=0) + if rmtmp: + os.remove(niiname) + + return out
+ + +
[docs]def save_cifti(data, filename, example, mask=None, vol=True, volatlas=None): + """ Write output to nifti """ + + # do some sanity checks + if data.dtype == 'float32' or \ + data.dtype == 'float' or \ + data.dtype == 'float64': + data = data.astype('float32') # force 32 bit output + dtype = 'NIFTI_TYPE_FLOAT32' + else: + raise(ValueError, 'Only float data types currently handled') + + if len(data.shape) == 1: + Nimg = 1 + data = data[:, np.newaxis] + else: + Nimg = data.shape[1] + + # get the base filename + dnam, fnam = os.path.split(filename) + fstem = file_stem(fnam) + + # Split the template + estem = os.path.join(tempfile.gettempdir(), str(os.getpid()) + "-" + fstem) + giiexnamel = estem + '-left.func.gii' + giiexnamer = estem + '-right.func.gii' + os.system('wb_command -cifti-separate ' + example + + ' COLUMN -metric CORTEX_LEFT ' + giiexnamel) + os.system('wb_command -cifti-separate ' + example + + ' COLUMN -metric CORTEX_RIGHT ' + giiexnamer) + + # write left hemisphere + giiexl = nib.load(giiexnamel) + Nvertl = len(giiexl.darrays[0].data) + garraysl = [] + for i in range(0, Nimg): + garraysl.append( + nib.gifti.gifti.GiftiDataArray(data=data[0:Nvertl, i], + datatype=dtype)) + giil = nib.gifti.gifti.GiftiImage(darrays=garraysl) + fnamel = fstem + '-left.func.gii' + nib.save(giil, fnamel) + + # write right hemisphere + giiexr = nib.load(giiexnamer) + Nvertr = len(giiexr.darrays[0].data) + garraysr = [] + for i in range(0, Nimg): + garraysr.append( + nib.gifti.gifti.GiftiDataArray(data=data[Nvertl:Nvertl+Nvertr, i], + datatype=dtype)) + giir = nib.gifti.gifti.GiftiImage(darrays=garraysr) + fnamer = fstem + '-right.func.gii' + nib.save(giir, fnamer) + + tmpfiles = [fnamer, fnamel, giiexnamel, giiexnamer] + + # process volumetric data + if vol: + niiexname = estem + '-vol.nii' + os.system('wb_command -cifti-separate ' + example + + ' COLUMN -volume-all ' + niiexname) + niivol = load_nifti(niiexname, vol=True) + if mask is None: + mask = create_mask(niivol) + + if volatlas is None: + volatlas = CIFTI_VOL_ATLAS + fnamev = fstem + '-vol.nii' + + save_nifti(data[Nvertr+Nvertl:, :], fnamev, niiexname, mask) + tmpfiles.extend([fnamev, niiexname]) + + # write cifti + fname = fstem + '.dtseries.nii' + os.system('wb_command -cifti-create-dense-timeseries ' + fname + + ' -volume ' + fnamev + ' ' + volatlas + + ' -left-metric ' + fnamel + ' -right-metric ' + fnamer) + + # clean up + for f in tmpfiles: + os.remove(f)
+ +# -------------- +# ascii routines +# -------------- + + +
[docs]def load_pd(filename): + # based on pandas + x = pd.read_csv(filename, + sep=' ', + header=None) + return x
+ + +
[docs]def save_pd(data, filename): + # based on pandas + data.to_csv(filename, + index=None, + header=None, + sep=' ', + na_rep='NaN')
+ + +
[docs]def load_ascii(filename): + # based on pandas + x = np.loadtxt(filename) + return x
+ + +
[docs]def save_ascii(data, filename): + # based on pandas + np.savetxt(filename, data)
+ +# ---------------- +# generic routines +# ---------------- + + +
[docs]def save(data, filename, example=None, mask=None, text=False): + + if file_type(filename) == 'cifti': + save_cifti(data.T, filename, example, vol=True) + elif file_type(filename) == 'nifti': + save_nifti(data.T, filename, example, mask) + elif text or file_type(filename) == 'text': + save_ascii(data, filename) + elif file_type(filename) == 'binary': + data = pd.DataFrame(data) + data.to_pickle(filename, protocol=PICKLE_PROTOCOL)
+ + +
[docs]def load(filename, mask=None, text=False, vol=True): + + if file_type(filename) == 'cifti': + x = load_cifti(filename, vol=vol) + elif file_type(filename) == 'nifti': + x = load_nifti(filename, mask) + elif text or file_type(filename) == 'text': + x = load_ascii(filename) + elif file_type(filename) == 'binary': + x = pd.read_pickle(filename) + x = x.to_numpy() - return x
+ return x
-# ------------------- -# sorting routines for batched in normative parallel -# ------------------- +# ------------------- +# sorting routines for batched in normative parallel +# ------------------- -
[docs]def tryint(s): - try: - return int(s) - except ValueError: - return s
+
[docs]def tryint(s): + try: + return int(s) + except ValueError: + return s
-
[docs]def alphanum_key(s): - return [tryint(c) for c in re.split('([0-9]+)', s)]
+
[docs]def alphanum_key(s): + return [tryint(c) for c in re.split('([0-9]+)', s)]
-
[docs]def sort_nicely(l): - return sorted(l, key=alphanum_key)
+
[docs]def sort_nicely(l): + return sorted(l, key=alphanum_key)
-
+
+
+
+ +
+ +
+

+ © Copyright 2020, Andre F. Marquand. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
- -
-
- - - + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_modules/gp.html b/doc/build/html/_modules/gp.html index 6f587d88..01933c67 100644 --- a/doc/build/html/_modules/gp.html +++ b/doc/build/html/_modules/gp.html @@ -1,575 +1,721 @@ - - - - - - - gp — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + gp — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
-
-
-
+ + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+

Source code for gp

-from __future__ import print_function
-from __future__ import division
-
-import os
-import sys
-import numpy as np
-from scipy import optimize
-from numpy.linalg import solve, LinAlgError
-from numpy.linalg import cholesky as chol
-from six import with_metaclass
-from abc import ABCMeta, abstractmethod
-
-
-try:  # Run as a package if installed    
-    from pcntoolkit.utils import squared_dist
-except ImportError:
-    pass
-
-    path = os.path.abspath(os.path.dirname(__file__))
-    if path not in sys.path:
-        sys.path.append(path)
-    del path
+from __future__ import print_function
+from __future__ import division
+
+import os
+import sys
+import numpy as np
+from scipy import optimize
+from numpy.linalg import solve, LinAlgError
+from numpy.linalg import cholesky as chol
+from six import with_metaclass
+from abc import ABCMeta, abstractmethod
+
+
+try:  # Run as a package if installed    
+    from pcntoolkit.utils import squared_dist
+except ImportError:
+    pass
+
+    path = os.path.abspath(os.path.dirname(__file__))
+    path = os.path.dirname(path) # parent directory 
+    if path not in sys.path:
+        sys.path.append(path)
+    del path
     
-    from utils import squared_dist
+    from util.utils import squared_dist
 
-# --------------------
-# Covariance functions
-# --------------------
+# --------------------
+# Covariance functions
+# --------------------
 
 
-
[docs]class CovBase(with_metaclass(ABCMeta)): - """ Base class for covariance functions. +
[docs]class CovBase(with_metaclass(ABCMeta)): + """ Base class for covariance functions. - All covariance functions must define the following methods:: + All covariance functions must define the following methods:: - CovFunction.get_n_params() - CovFunction.cov() - CovFunction.xcov() - CovFunction.dcov() - """ + CovFunction.get_n_params() + CovFunction.cov() + CovFunction.xcov() + CovFunction.dcov() + """ - def __init__(self, x=None): - self.n_params = np.nan + def __init__(self, x=None): + self.n_params = np.nan -
[docs] def get_n_params(self): - """ Report the number of parameters required """ +
[docs] def get_n_params(self): + """ Report the number of parameters required """ - assert not np.isnan(self.n_params), \ - "Covariance function not initialised" + assert not np.isnan(self.n_params), \ + "Covariance function not initialised" - return self.n_params
+ return self.n_params
-
[docs] @abstractmethod - def cov(self, theta, x, z=None): - """ Return the full covariance (or cross-covariance if z is given) """
+
[docs] @abstractmethod + def cov(self, theta, x, z=None): + """ Return the full covariance (or cross-covariance if z is given) """
-
[docs] @abstractmethod - def dcov(self, theta, x, i): - """ Return the derivative of the covariance function with respect to - the i-th hyperparameter """
+
[docs] @abstractmethod + def dcov(self, theta, x, i): + """ Return the derivative of the covariance function with respect to + the i-th hyperparameter """
-
[docs]class CovLin(CovBase): - """ Linear covariance function (no hyperparameters) - """ +
[docs]class CovLin(CovBase): + """ Linear covariance function (no hyperparameters) + """ - def __init__(self, x=None): - self.n_params = 0 - self.first_call = False + def __init__(self, x=None): + self.n_params = 0 + self.first_call = False -
[docs] def cov(self, theta, x, z=None): - if not self.first_call and not theta and theta is not None: - self.first_call = True - if len(theta) > 0 and theta[0] is not None: - print("CovLin: ignoring unnecessary hyperparameter ...") +
[docs] def cov(self, theta, x, z=None): + if not self.first_call and not theta and theta is not None: + self.first_call = True + if len(theta) > 0 and theta[0] is not None: + print("CovLin: ignoring unnecessary hyperparameter ...") - if z is None: - z = x + if z is None: + z = x - K = x.dot(z.T) - return K
+ K = x.dot(z.T) + return K
-
[docs] def dcov(self, theta, x, i): - raise ValueError("Invalid covariance function parameter")
+
[docs] def dcov(self, theta, x, i): + raise ValueError("Invalid covariance function parameter")
-
[docs]class CovSqExp(CovBase): - """ Ordinary squared exponential covariance function. - The hyperparameters are:: +
[docs]class CovSqExp(CovBase): + """ Ordinary squared exponential covariance function. + The hyperparameters are:: - theta = ( log(ell), log(sf) ) + theta = ( log(ell), log(sf) ) - where ell is a lengthscale parameter and sf2 is the signal variance - """ + where ell is a lengthscale parameter and sf2 is the signal variance + """ - def __init__(self, x=None): - self.n_params = 2 + def __init__(self, x=None): + self.n_params = 2 -
[docs] def cov(self, theta, x, z=None): - self.ell = np.exp(theta[0]) - self.sf2 = np.exp(2*theta[1]) +
[docs] def cov(self, theta, x, z=None): + self.ell = np.exp(theta[0]) + self.sf2 = np.exp(2*theta[1]) - if z is None: - z = x + if z is None: + z = x - R = squared_dist(x/self.ell, z/self.ell) - K = self.sf2 * np.exp(-R/2) - return K
+ R = squared_dist(x/self.ell, z/self.ell) + K = self.sf2 * np.exp(-R/2) + return K
-
[docs] def dcov(self, theta, x, i): - self.ell = np.exp(theta[0]) - self.sf2 = np.exp(2*theta[1]) +
[docs] def dcov(self, theta, x, i): + self.ell = np.exp(theta[0]) + self.sf2 = np.exp(2*theta[1]) - R = squared_dist(x/self.ell, x/self.ell) + R = squared_dist(x/self.ell, x/self.ell) - if i == 0: # return derivative of lengthscale parameter - dK = self.sf2 * np.exp(-R/2) * R - return dK - elif i == 1: # return derivative of signal variance parameter - dK = 2*self.sf2 * np.exp(-R/2) - return dK - else: - raise ValueError("Invalid covariance function parameter")
+ if i == 0: # return derivative of lengthscale parameter + dK = self.sf2 * np.exp(-R/2) * R + return dK + elif i == 1: # return derivative of signal variance parameter + dK = 2*self.sf2 * np.exp(-R/2) + return dK + else: + raise ValueError("Invalid covariance function parameter")
-
[docs]class CovSqExpARD(CovBase): - """ Squared exponential covariance function with ARD - The hyperparameters are:: - - theta = (log(ell_1, ..., log_ell_D), log(sf)) - - where ell_i are lengthscale parameters and sf2 is the signal variance - """ - - def __init__(self, x=None): - if x is None: - raise ValueError("N x D data matrix must be supplied as input") - if len(x.shape) == 1: - self.D = 1 - else: - self.D = x.shape[1] - self.n_params = self.D + 1 +
[docs]class CovSqExpARD(CovBase): + """ Squared exponential covariance function with ARD + The hyperparameters are:: + + theta = (log(ell_1, ..., log_ell_D), log(sf)) + + where ell_i are lengthscale parameters and sf2 is the signal variance + """ + + def __init__(self, x=None): + if x is None: + raise ValueError("N x D data matrix must be supplied as input") + if len(x.shape) == 1: + self.D = 1 + else: + self.D = x.shape[1] + self.n_params = self.D + 1 -
[docs] def cov(self, theta, x, z=None): - self.ell = np.exp(theta[0:self.D]) - self.sf2 = np.exp(2*theta[self.D]) +
[docs] def cov(self, theta, x, z=None): + self.ell = np.exp(theta[0:self.D]) + self.sf2 = np.exp(2*theta[self.D]) - if z is None: - z = x + if z is None: + z = x - R = squared_dist(x.dot(np.diag(1./self.ell)), - z.dot(np.diag(1./self.ell))) - K = self.sf2*np.exp(-R/2) - return K
- -
[docs] def dcov(self, theta, x, i): - K = self.cov(theta, x) - if i < self.D: # return derivative of lengthscale parameter - dK = K * squared_dist(x[:, i]/self.ell[i], x[:, i]/self.ell[i]) - return dK - elif i == self.D: # return derivative of signal variance parameter - dK = 2*K - return dK - else: - raise ValueError("Invalid covariance function parameter")
- - -
[docs]class CovSum(CovBase): - """ Sum of covariance functions. These are passed in as a cell array and - intialised automatically. For example:: - - C = CovSum(x,(CovLin, CovSqExpARD)) - C = CovSum.cov(x, ) - - The hyperparameters are:: + R = squared_dist(x.dot(np.diag(1./self.ell)), + z.dot(np.diag(1./self.ell))) + K = self.sf2*np.exp(-R/2) + return K
+ +
[docs] def dcov(self, theta, x, i): + K = self.cov(theta, x) + if i < self.D: # return derivative of lengthscale parameter + dK = K * squared_dist(x[:, i]/self.ell[i], x[:, i]/self.ell[i]) + return dK + elif i == self.D: # return derivative of signal variance parameter + dK = 2*K + return dK + else: + raise ValueError("Invalid covariance function parameter")
+ + +
[docs]class CovSum(CovBase): + """ Sum of covariance functions. These are passed in as a cell array and + intialised automatically. For example:: + + C = CovSum(x,(CovLin, CovSqExpARD)) + C = CovSum.cov(x, ) + + The hyperparameters are:: - theta = ( log(ell_1, ..., log_ell_D), log(sf2) ) + theta = ( log(ell_1, ..., log_ell_D), log(sf2) ) - where ell_i are lengthscale parameters and sf2 is the signal variance - """ - - def __init__(self, x=None, covfuncnames=None): - if x is None: - raise ValueError("N x D data matrix must be supplied as input") - if covfuncnames is None: - raise ValueError("A list of covariance functions is required") - self.covfuncs = [] - self.n_params = 0 - for cname in covfuncnames: - covfunc = eval(cname + '(x)') - self.n_params += covfunc.get_n_params() - self.covfuncs.append(covfunc) - - if len(x.shape) == 1: - self.N = len(x) - self.D = 1 - else: - self.N, self.D = x.shape - -
[docs] def cov(self, theta, x, z=None): - theta_offset = 0 - for ci, covfunc in enumerate(self.covfuncs): - try: - n_params_c = covfunc.get_n_params() - theta_c = [theta[c] for c in - range(theta_offset, theta_offset + n_params_c)] - theta_offset += n_params_c - except Exception as e: - print(e) - - if ci == 0: - K = covfunc.cov(theta_c, x, z) - else: - K += covfunc.cov(theta_c, x, z) - return K
- -
[docs] def dcov(self, theta, x, i): - theta_offset = 0 - for covfunc in self.covfuncs: - n_params_c = covfunc.get_n_params() - theta_c = [theta[c] for c in - range(theta_offset, theta_offset + n_params_c)] - theta_offset += n_params_c - - if theta_c: # does the variable have any hyperparameters? - if 'dK' not in locals(): - dK = covfunc.dcov(theta_c, x, i) - else: - dK += covfunc.dcov(theta_c, x, i) - return dK
- -# ----------------------- -# Gaussian process models -# ----------------------- - - -
[docs]class GPR: - """Gaussian process regression - - Estimation and prediction of Gaussian process regression models - - Basic usage:: - - G = GPR() - hyp = B.estimate(hyp0, cov, X, y) - ys, ys2 = B.predict(hyp, cov, X, y, Xs) - - where the variables are - - :param hyp: vector of hyperparmaters - :param cov: covariance function - :param X: N x D data array - :param y: 1D Array of targets (length N) - :param Xs: Nte x D array of test cases - :param hyp0: starting estimates for hyperparameter optimisation - - :returns: * ys - predictive mean - * ys2 - predictive variance - - The hyperparameters are:: - - hyp = ( log(sn), (cov function params) ) # hyp is a list or array - - The implementation and notation follows Rasmussen and Williams (2006). - As in the gpml toolbox, these parameters are estimated using conjugate - gradient optimisation of the marginal likelihood. Note that there is no - explicit mean function, thus the gpr routines are limited to modelling - zero-mean processes. - - Reference: - C. Rasmussen and C. Williams (2006) Gaussian Processes for Machine Learning - - Written by A. Marquand - """ - - def __init__(self, hyp=None, covfunc=None, X=None, y=None, n_iter=100, - tol=1e-3, verbose=False, warp=None): - - self.hyp = np.nan - self.nlZ = np.nan - self.tol = tol # not used at present - self.n_iter = n_iter - self.verbose = verbose - - # set up warped likelihood - if warp is None: - self.warp = None - self.n_warp_param = 0 - else: - self.warp = warp - self.n_warp_param = warp.get_n_params() + where ell_i are lengthscale parameters and sf2 is the signal variance + """ + + def __init__(self, x=None, covfuncnames=None): + if x is None: + raise ValueError("N x D data matrix must be supplied as input") + if covfuncnames is None: + raise ValueError("A list of covariance functions is required") + self.covfuncs = [] + self.n_params = 0 + for cname in covfuncnames: + covfunc = eval(cname + '(x)') + self.n_params += covfunc.get_n_params() + self.covfuncs.append(covfunc) + + if len(x.shape) == 1: + self.N = len(x) + self.D = 1 + else: + self.N, self.D = x.shape + +
[docs] def cov(self, theta, x, z=None): + theta_offset = 0 + for ci, covfunc in enumerate(self.covfuncs): + try: + n_params_c = covfunc.get_n_params() + theta_c = [theta[c] for c in + range(theta_offset, theta_offset + n_params_c)] + theta_offset += n_params_c + except Exception as e: + print(e) + + if ci == 0: + K = covfunc.cov(theta_c, x, z) + else: + K += covfunc.cov(theta_c, x, z) + return K
+ +
[docs] def dcov(self, theta, x, i): + theta_offset = 0 + for covfunc in self.covfuncs: + n_params_c = covfunc.get_n_params() + theta_c = [theta[c] for c in + range(theta_offset, theta_offset + n_params_c)] + theta_offset += n_params_c + + if theta_c: # does the variable have any hyperparameters? + if 'dK' not in locals(): + dK = covfunc.dcov(theta_c, x, i) + else: + dK += covfunc.dcov(theta_c, x, i) + return dK
+ +# ----------------------- +# Gaussian process models +# ----------------------- + + +
[docs]class GPR: + """Gaussian process regression + + Estimation and prediction of Gaussian process regression models + + Basic usage:: + + G = GPR() + hyp = B.estimate(hyp0, cov, X, y) + ys, ys2 = B.predict(hyp, cov, X, y, Xs) + + where the variables are + + :param hyp: vector of hyperparmaters + :param cov: covariance function + :param X: N x D data array + :param y: 1D Array of targets (length N) + :param Xs: Nte x D array of test cases + :param hyp0: starting estimates for hyperparameter optimisation + + :returns: * ys - predictive mean + * ys2 - predictive variance + + The hyperparameters are:: + + hyp = ( log(sn), (cov function params) ) # hyp is a list or array + + The implementation and notation follows Rasmussen and Williams (2006). + As in the gpml toolbox, these parameters are estimated using conjugate + gradient optimisation of the marginal likelihood. Note that there is no + explicit mean function, thus the gpr routines are limited to modelling + zero-mean processes. + + Reference: + C. Rasmussen and C. Williams (2006) Gaussian Processes for Machine Learning + + Written by A. Marquand + """ + + def __init__(self, hyp=None, covfunc=None, X=None, y=None, n_iter=100, + tol=1e-3, verbose=False, warp=None): + + self.hyp = np.nan + self.nlZ = np.nan + self.tol = tol # not used at present + self.n_iter = n_iter + self.verbose = verbose + + # set up warped likelihood + if warp is None: + self.warp = None + self.n_warp_param = 0 + else: + self.warp = warp + self.n_warp_param = warp.get_n_params() - self.gamma = None + self.gamma = None - def _updatepost(self, hyp, covfunc): + def _updatepost(self, hyp, covfunc): - hypeq = np.asarray(hyp == self.hyp) - if hypeq.all() and hasattr(self, 'alpha') and \ - (hasattr(self, 'covfunc') and covfunc == self.covfunc): - return False - else: - return True + hypeq = np.asarray(hyp == self.hyp) + if hypeq.all() and hasattr(self, 'alpha') and \ + (hasattr(self, 'covfunc') and covfunc == self.covfunc): + return False + else: + return True -
[docs] def post(self, hyp, covfunc, X, y): - """ Generic function to compute posterior distribution. - """ +
[docs] def post(self, hyp, covfunc, X, y): + """ Generic function to compute posterior distribution. + """ - if len(hyp.shape) > 1: # force 1d hyperparameter array - hyp = hyp.flatten() - - if len(X.shape) == 1: - X = X[:, np.newaxis] - self.N, self.D = X.shape - - # hyperparameters - sn2 = np.exp(2*hyp[0]) # noise variance - if self.warp is not None: # parameters for warping the likelhood - n_lik_param = self.n_warp_param+1 - else: - n_lik_param = 1 - theta = hyp[n_lik_param:] # (generic) covariance hyperparameters - - if self.verbose: - print("estimating posterior ... | hyp=", hyp) - - self.K = covfunc.cov(theta, X) - self.L = chol(self.K + sn2*np.eye(self.N)) - self.alpha = solve(self.L.T, solve(self.L, y)) - self.hyp = hyp - self.covfunc = covfunc
- -
[docs] def loglik(self, hyp, covfunc, X, y): - """ Function to compute compute log (marginal) likelihood - """ - - # load or recompute posterior - if self.verbose: - print("computing likelihood ... | hyp=", hyp) + if len(hyp.shape) > 1: # force 1d hyperparameter array + hyp = hyp.flatten() + + if len(X.shape) == 1: + X = X[:, np.newaxis] + self.N, self.D = X.shape + + # hyperparameters + sn2 = np.exp(2*hyp[0]) # noise variance + if self.warp is not None: # parameters for warping the likelhood + n_lik_param = self.n_warp_param+1 + else: + n_lik_param = 1 + theta = hyp[n_lik_param:] # (generic) covariance hyperparameters + + if self.verbose: + print("estimating posterior ... | hyp=", hyp) + + self.K = covfunc.cov(theta, X) + self.L = chol(self.K + sn2*np.eye(self.N)) + self.alpha = solve(self.L.T, solve(self.L, y)) + self.hyp = hyp + self.covfunc = covfunc
+ +
[docs] def loglik(self, hyp, covfunc, X, y): + """ Function to compute compute log (marginal) likelihood + """ + + # load or recompute posterior + if self.verbose: + print("computing likelihood ... | hyp=", hyp) - # parameters for warping the likelhood function - if self.warp is not None: - gamma = hyp[1:(self.n_warp_param+1)] - y = self.warp.f(y, gamma) - y_unwarped = y + # parameters for warping the likelhood function + if self.warp is not None: + gamma = hyp[1:(self.n_warp_param+1)] + y = self.warp.f(y, gamma) + y_unwarped = y - if len(hyp.shape) > 1: # force 1d hyperparameter array - hyp = hyp.flatten() - if self._updatepost(hyp, covfunc): - try: - self.post(hyp, covfunc, X, y) - except (ValueError, LinAlgError): - print("Warning: Estimation of posterior distribution failed") - self.nlZ = 1/np.finfo(float).eps - return self.nlZ + if len(hyp.shape) > 1: # force 1d hyperparameter array + hyp = hyp.flatten() + if self._updatepost(hyp, covfunc): + try: + self.post(hyp, covfunc, X, y) + except (ValueError, LinAlgError): + print("Warning: Estimation of posterior distribution failed") + self.nlZ = 1/np.finfo(float).eps + return self.nlZ - self.nlZ = 0.5*y.T.dot(self.alpha) + sum(np.log(np.diag(self.L))) + \ - 0.5*self.N*np.log(2*np.pi) + self.nlZ = 0.5*y.T.dot(self.alpha) + sum(np.log(np.diag(self.L))) + \ + 0.5*self.N*np.log(2*np.pi) - if self.warp is not None: - # add in the Jacobian - self.nlZ = self.nlZ - sum(np.log(self.warp.df(y_unwarped, gamma))) + if self.warp is not None: + # add in the Jacobian + self.nlZ = self.nlZ - sum(np.log(self.warp.df(y_unwarped, gamma))) - # make sure the output is finite to stop the minimizer getting upset - if not np.isfinite(self.nlZ): - self.nlZ = 1/np.finfo(float).eps + # make sure the output is finite to stop the minimizer getting upset + if not np.isfinite(self.nlZ): + self.nlZ = 1/np.finfo(float).eps - if self.verbose: - print("nlZ= ", self.nlZ, " | hyp=", hyp) + if self.verbose: + print("nlZ= ", self.nlZ, " | hyp=", hyp) - return self.nlZ
+ return self.nlZ
-
[docs] def dloglik(self, hyp, covfunc, X, y): - """ Function to compute derivatives - """ +
[docs] def dloglik(self, hyp, covfunc, X, y): + """ Function to compute derivatives + """ - if len(hyp.shape) > 1: # force 1d hyperparameter array - hyp = hyp.flatten() + if len(hyp.shape) > 1: # force 1d hyperparameter array + hyp = hyp.flatten() - if self.warp is not None: - raise ValueError('optimization with derivatives is not yet ' + \ - 'supported for warped liklihood') + if self.warp is not None: + raise ValueError('optimization with derivatives is not yet ' + \ + 'supported for warped liklihood') - # hyperparameters - sn2 = np.exp(2*hyp[0]) # noise variance - theta = hyp[1:] # (generic) covariance hyperparameters - - # load posterior and prior covariance - if self._updatepost(hyp, covfunc): - try: - self.post(hyp, covfunc, X, y) - except (ValueError, LinAlgError): - print("Warning: Estimation of posterior distribution failed") - dnlZ = np.sign(self.dnlZ) / np.finfo(float).eps - return dnlZ - - # compute Q = alpha*alpha' - inv(K) - Q = np.outer(self.alpha, self.alpha) - \ - solve(self.L.T, solve(self.L, np.eye(self.N))) - - # initialise derivatives - self.dnlZ = np.zeros(len(hyp)) - - # noise variance - self.dnlZ[0] = -sn2*np.trace(Q) - - # covariance parameter(s) - for par in range(0, len(theta)): - # compute -0.5*trace(Q.dot(dK/d[theta_i])) efficiently - dK = covfunc.dcov(theta, X, i=par) - self.dnlZ[par+1] = -0.5*np.sum(np.sum(Q*dK.T)) - - # make sure the gradient is finite to stop the minimizer getting upset - if not all(np.isfinite(self.dnlZ)): - bad = np.where(np.logical_not(np.isfinite(self.dnlZ))) - for b in bad: - self.dnlZ[b] = np.sign(self.dnlZ[b]) / np.finfo(float).eps - - if self.verbose: - print("dnlZ= ", self.dnlZ, " | hyp=", hyp) - - return self.dnlZ
- - # model estimation (optimization) -
[docs] def estimate(self, hyp0, covfunc, X, y, optimizer='cg'): - """ Function to estimate the model - """ - if len(X.shape) == 1: - X = X[:, np.newaxis] - - self.hyp0 = hyp0 + # hyperparameters + sn2 = np.exp(2*hyp[0]) # noise variance + theta = hyp[1:] # (generic) covariance hyperparameters + + # load posterior and prior covariance + if self._updatepost(hyp, covfunc): + try: + self.post(hyp, covfunc, X, y) + except (ValueError, LinAlgError): + print("Warning: Estimation of posterior distribution failed") + dnlZ = np.sign(self.dnlZ) / np.finfo(float).eps + return dnlZ + + # compute Q = alpha*alpha' - inv(K) + Q = np.outer(self.alpha, self.alpha) - \ + solve(self.L.T, solve(self.L, np.eye(self.N))) + + # initialise derivatives + self.dnlZ = np.zeros(len(hyp)) + + # noise variance + self.dnlZ[0] = -sn2*np.trace(Q) + + # covariance parameter(s) + for par in range(0, len(theta)): + # compute -0.5*trace(Q.dot(dK/d[theta_i])) efficiently + dK = covfunc.dcov(theta, X, i=par) + self.dnlZ[par+1] = -0.5*np.sum(np.sum(Q*dK.T)) + + # make sure the gradient is finite to stop the minimizer getting upset + if not all(np.isfinite(self.dnlZ)): + bad = np.where(np.logical_not(np.isfinite(self.dnlZ))) + for b in bad: + self.dnlZ[b] = np.sign(self.dnlZ[b]) / np.finfo(float).eps + + if self.verbose: + print("dnlZ= ", self.dnlZ, " | hyp=", hyp) + + return self.dnlZ
+ + # model estimation (optimization) +
[docs] def estimate(self, hyp0, covfunc, X, y, optimizer='cg'): + """ Function to estimate the model + """ + if len(X.shape) == 1: + X = X[:, np.newaxis] + + self.hyp0 = hyp0 - if optimizer.lower() == 'cg': # conjugate gradients - out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, - (covfunc, X, y), disp=True, gtol=self.tol, - maxiter=self.n_iter, full_output=1) - - elif optimizer.lower() == 'powell': # Powell's method - out = optimize.fmin_powell(self.loglik, hyp0, (covfunc, X, y), - full_output=1) - else: - raise ValueError("unknown optimizer") - - # Always return a 1d array. The optimizer sometimes changes dimesnions - if len(out[0].shape) > 1: - self.hyp = out[0].flatten() - else: - self.hyp = out[0] - self.nlZ = out[1] - self.optimizer = optimizer - - return self.hyp
- -
[docs] def predict(self, hyp, X, y, Xs): - """ Function to make predictions from the model - """ - if len(hyp.shape) > 1: # force 1d hyperparameter array - hyp = hyp.flatten() + if optimizer.lower() == 'cg': # conjugate gradients + out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, + (covfunc, X, y), disp=True, gtol=self.tol, + maxiter=self.n_iter, full_output=1) + + elif optimizer.lower() == 'powell': # Powell's method + out = optimize.fmin_powell(self.loglik, hyp0, (covfunc, X, y), + full_output=1) + else: + raise ValueError("unknown optimizer") + + # Always return a 1d array. The optimizer sometimes changes dimesnions + if len(out[0].shape) > 1: + self.hyp = out[0].flatten() + else: + self.hyp = out[0] + self.nlZ = out[1] + self.optimizer = optimizer + + return self.hyp
+ +
[docs] def predict(self, hyp, X, y, Xs): + """ Function to make predictions from the model + """ + if len(hyp.shape) > 1: # force 1d hyperparameter array + hyp = hyp.flatten() - # ensure X and Xs are multi-dimensional arrays - if len(Xs.shape) == 1: - Xs = Xs[:, np.newaxis] - if len(X.shape) == 1: - X = X[:, np.newaxis] + # ensure X and Xs are multi-dimensional arrays + if len(Xs.shape) == 1: + Xs = Xs[:, np.newaxis] + if len(X.shape) == 1: + X = X[:, np.newaxis] - # parameters for warping the likelhood function - if self.warp is not None: - gamma = hyp[1:(self.n_warp_param+1)] - y = self.warp.f(y, gamma) + # parameters for warping the likelhood function + if self.warp is not None: + gamma = hyp[1:(self.n_warp_param+1)] + y = self.warp.f(y, gamma) - # reestimate posterior (avoids numerical problems with optimizer) - self.post(hyp, self.covfunc, X, y) + # reestimate posterior (avoids numerical problems with optimizer) + self.post(hyp, self.covfunc, X, y) - # hyperparameters - sn2 = np.exp(2*hyp[0]) # noise variance - theta = hyp[(self.n_warp_param + 1):] # (generic) covariance hyperparameters + # hyperparameters + sn2 = np.exp(2*hyp[0]) # noise variance + theta = hyp[(self.n_warp_param + 1):] # (generic) covariance hyperparameters - Ks = self.covfunc.cov(theta, Xs, X) - kss = self.covfunc.cov(theta, Xs) + Ks = self.covfunc.cov(theta, Xs, X) + kss = self.covfunc.cov(theta, Xs) - # predictive mean - ymu = Ks.dot(self.alpha) + # predictive mean + ymu = Ks.dot(self.alpha) - # predictive variance (for a noisy test input) - v = solve(self.L, Ks.T) - ys2 = kss - v.T.dot(v) + sn2 + # predictive variance (for a noisy test input) + v = solve(self.L, Ks.T) + ys2 = kss - v.T.dot(v) + sn2 - return ymu, ys2
+ return ymu, ys2
-
+
+
+
+ +
+ +
+

+ © Copyright 2020, Andre F. Marquand. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
- -
-
- - - + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_modules/index.html b/doc/build/html/_modules/index.html index f0aaa7f5..89de4fe2 100644 --- a/doc/build/html/_modules/index.html +++ b/doc/build/html/_modules/index.html @@ -1,46 +1,186 @@ - - - - - - - Overview: module code — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + Overview: module code — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
-
-
-
+ + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Overview: module code
  • + + +
  • + +
  • + +
+ + +
+
+
+

All modules for which code is available

-
+
+
+
+ +
+ +
+

+ © Copyright 2020, Andre F. Marquand. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
- -
-
- - - + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_modules/normative.html b/doc/build/html/_modules/normative.html deleted file mode 100644 index 1d53a8ed..00000000 --- a/doc/build/html/_modules/normative.html +++ /dev/null @@ -1,1021 +0,0 @@ - - - - - - - - - normative — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - - - - - - - - - - - - - - - -
-
-
-
- -

Source code for normative

-#!/Users/andre/sfw/anaconda3/bin/python
-
-# ------------------------------------------------------------------------------
-#  Usage:
-#  python normative.py -m [maskfile] -k [number of CV folds] -c <covariates>
-#                      -t [test covariates] -r [test responses] <infile>
-#
-#  Either the -k switch or -t switch should be specified, but not both.
-#  If -t is selected, a set of responses should be provided with the -r switch
-#
-#  Written by A. Marquand
-# ------------------------------------------------------------------------------
-
-from __future__ import print_function
-from __future__ import division
-
-import os
-import sys
-import numpy as np
-import argparse
-import pickle
-import glob
-
-from sklearn.model_selection import KFold
-try:  # run as a package if installed
-    from pcntoolkit import fileio
-    from pcntoolkit import configs
-    from pcntoolkit.normative_model.norm_utils import norm_init
-    from pcntoolkit.utils import compute_pearsonr, CustomCV, explained_var, compute_MSLL
-except ImportError:
-    pass
-
-    path = os.path.abspath(os.path.dirname(__file__))
-    if path not in sys.path:
-        sys.path.append(path)
-        #sys.path.append(os.path.join(path,'normative_model'))
-    del path
-
-    import fileio
-    import configs
-    from utils import compute_pearsonr, CustomCV, explained_var, compute_MSLL
-    from normative_model.norm_utils import norm_init
-
-PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL
-
-
[docs]def load_response_vars(datafile, maskfile=None, vol=True): - """ load response variables (of any data type)""" - - if fileio.file_type(datafile) == 'nifti': - dat = fileio.load_nifti(datafile, vol=vol) - volmask = fileio.create_mask(dat, mask=maskfile) - Y = fileio.vol2vec(dat, volmask).T - else: - Y = fileio.load(datafile) - volmask = None - if fileio.file_type(datafile) == 'cifti': - Y = Y.T - - return Y, volmask
- - -
[docs]def get_args(*args): - """ Parse command line arguments""" - - # parse arguments - parser = argparse.ArgumentParser(description="Normative Modeling") - parser.add_argument("responses") - parser.add_argument("-f", help="Function to call", dest="func", - default="estimate") - parser.add_argument("-m", help="mask file", dest="maskfile", default=None) - parser.add_argument("-c", help="covariates file", dest="covfile", - default=None) - parser.add_argument("-k", help="cross-validation folds", dest="cvfolds", - default=None) - parser.add_argument("-t", help="covariates (test data)", dest="testcov", - default=None) - parser.add_argument("-r", help="responses (test data)", dest="testresp", - default=None) - parser.add_argument("-a", help="algorithm", dest="alg", default="gpr") - parser.add_argument("-x", help="algorithm specific config options", - dest="configparam", default=None) - parser.add_argument('-s', action='store_false', - help="Flag to skip standardization.", dest="standardize") - parser.add_argument("keyword_args", nargs=argparse.REMAINDER) - - args = parser.parse_args() - - # Process required arguemnts - wdir = os.path.realpath(os.path.curdir) - respfile = os.path.join(wdir, args.responses) - if args.covfile is None: - raise(ValueError, "No covariates specified") - else: - covfile = args.covfile - - # Process optional arguments - if args.maskfile is None: - maskfile = None - else: - maskfile = os.path.join(wdir, args.maskfile) - if args.testcov is None and args.cvfolds is not None: - testcov = None - testresp = None - cvfolds = int(args.cvfolds) - print("Running under " + str(cvfolds) + " fold cross-validation.") - else: - print("Test covariates specified") - testcov = args.testcov - cvfolds = None - if args.testresp is None: - testresp = None - print("No test response variables specified") - else: - testresp = args.testresp - if args.cvfolds is not None: - print("Ignoring cross-valdation specification (test data given)") - - # Process addtional keyword arguments. These are always added as strings - kw_args = {} - for kw in args.keyword_args: - kw_arg = kw.split('=') - - exec("kw_args.update({'" + kw_arg[0] + "' : " + - "'" + str(kw_arg[1]) + "'" + "})") - - return respfile, maskfile, covfile, cvfolds, \ - testcov, testresp, args.func, args.alg, \ - args.configparam, args.standardize, kw_args
- - -
[docs]def evaluate(Y, Yhat, S2=None, mY=None, sY=None, - metrics = ['Rho', 'RMSE', 'SMSE', 'EXPV', 'MSLL']): - ''' Compute error metrics - This function will compute error metrics based on a set of predictions Yhat - and a set of true response variables Y, namely: - - * Rho: Pearson correlation - * RMSE: root mean squared error - * SMSE: standardized mean squared error - * EXPV: explained variance - - If the predictive variance is also specified the log loss will be computed - (which also takes into account the predictive variance). If the mean and - standard deviation are also specified these will be used to standardize - this, yielding the mean standardized log loss - - :param Y: N x P array of true response variables - :param Yhat: N x P array of predicted response variables - :param S2: predictive variance - :param mY: mean of the training set - :param sY: standard deviation of the training set - - :returns metrics: evaluation metrics - - ''' - - feature_num = Y.shape[1] - - # Remove metrics that cannot be computed with only a single data point - if Y.shape[0] == 1: - if 'MSLL' in metrics: - metrics.remove('MSLL') - if 'SMSE' in metrics: - metrics.remove('SMSE') - - # find and remove bad variables from the response variables - nz = np.where(np.bitwise_and(np.isfinite(Y).any(axis=0), - np.var(Y, axis=0) != 0))[0] - - MSE = np.mean((Y - Yhat)**2, axis=0) - - results = dict() - - if 'RMSE' in metrics: - RMSE = np.sqrt(MSE) - results['RMSE'] = RMSE - - if 'Rho' in metrics: - Rho = np.zeros(feature_num) - pRho = np.ones(feature_num) - Rho[nz], pRho[nz] = compute_pearsonr(Y[:,nz], Yhat[:,nz]) - results['Rho'] = Rho - results['pRho'] = pRho - - if 'SMSE' in metrics: - SMSE = np.zeros_like(MSE) - SMSE[nz] = MSE[nz] / np.var(Y[:,nz], axis=0) - results['SMSE'] = SMSE - - if 'EXPV' in metrics: - EXPV = np.zeros(feature_num) - EXPV[nz] = explained_var(Y[:,nz], Yhat[:,nz]) - results['EXPV'] = EXPV - - if 'MSLL' in metrics: - if ((S2 is not None) and (mY is not None) and (sY is not None)): - MSLL = np.zeros(feature_num) - MSLL[nz] = compute_MSLL(Y[:,nz], Yhat[:,nz], S2[:,nz], - mY.reshape(-1,1).T, - (sY**2).reshape(-1,1).T) - results['MSLL'] = MSLL - - return results
- -
[docs]def save_results(respfile, Yhat, S2, maskvol, Z=None, outputsuffix=None, - results=None, save_path=''): - - print("Writing outputs ...") - if respfile is None: - exfile = None - file_ext = '.pkl' - else: - if fileio.file_type(respfile) == 'cifti' or \ - fileio.file_type(respfile) == 'nifti': - exfile = respfile - else: - exfile = None - file_ext = fileio.file_extension(respfile) - - if outputsuffix is not None: - ext = str(outputsuffix) + file_ext - else: - ext = file_ext - - fileio.save(Yhat, os.path.join(save_path, 'yhat' + ext), example=exfile, - mask=maskvol) - fileio.save(S2, os.path.join(save_path, 'ys2' + ext), example=exfile, - mask=maskvol) - if Z is not None: - fileio.save(Z, os.path.join(save_path, 'Z' + ext), example=exfile, - mask=maskvol) - - if results is not None: - for metric in list(results.keys()): - fileio.save(results[metric], os.path.join(save_path, metric + ext), - example=exfile, mask=maskvol)
- -
[docs]def estimate(covfile, respfile, **kwargs): - """ Estimate a normative model - - This will estimate a model in one of two settings according to theparticular parameters specified (see below) - - * under k-fold cross-validation. - requires respfile, covfile and cvfolds>=2 - * estimating a training dataset then applying to a second test dataset. - requires respfile, covfile, testcov and testresp. - * estimating on a training dataset ouput of forward maps mean and se. - requires respfile, covfile and testcov - - The models are estimated on the basis of data stored on disk in ascii or - neuroimaging data formats (nifti or cifti). Ascii data should be in - tab or space delimited format with the number of subjects in rows and the - number of variables in columns. Neuroimaging data will be reshaped - into the appropriate format - - Basic usage:: - - estimate(covfile, respfile, [extra_arguments]) - - where the variables are defined below. Note that either the cfolds - parameter or (testcov, testresp) should be specified, but not both. - - :param respfile: response variables for the normative model - :param covfile: covariates used to predict the response variable - :param maskfile: mask used to apply to the data (nifti only) - :param cvfolds: Number of cross-validation folds - :param testcov: Test covariates - :param testresp: Test responses - :param alg: Algorithm for normative model - :param configparam: Parameters controlling the estimation algorithm - :param saveoutput: Save the output to disk? Otherwise returned as arrays - :param outputsuffix: Text string to add to the output filenames - - All outputs are written to disk in the same format as the input. These are: - - :outputs: * yhat - predictive mean - * ys2 - predictive variance - * nm - normative model - * Z - deviance scores - * Rho - Pearson correlation between true and predicted responses - * pRho - parametric p-value for this correlation - * rmse - root mean squared error between true/predicted responses - * smse - standardised mean squared error - - The outputsuffix may be useful to estimate multiple normative models in the - same directory (e.g. for custom cross-validation schemes) - """ - - # parse keyword arguments - maskfile = kwargs.pop('maskfile',None) - cvfolds = kwargs.pop('cvfolds', None) - testcov = kwargs.pop('testcov', None) - testresp = kwargs.pop('testresp',None) - alg = kwargs.pop('alg','gpr') - outputsuffix = kwargs.pop('outputsuffix','_estimate') - standardize = kwargs.pop('standardize','True') - warp = kwargs.get('warp', None) - - # convert from strings if necessary - if type(standardize) is str: - standardize = standardize=='True' - saveoutput = kwargs.pop('saveoutput','True') - if type(saveoutput) is str: - saveoutput = saveoutput=='True' - savemodel = kwargs.pop('savemodel','False') - if type(savemodel) is str: - savemodel = savemodel=='True' - - if savemodel and not os.path.isdir('Models'): - os.mkdir('Models') - - # load data - print("Processing data in " + respfile) - X = fileio.load(covfile) - Y, maskvol = load_response_vars(respfile, maskfile) - if len(Y.shape) == 1: - Y = Y[:, np.newaxis] - if len(X.shape) == 1: - X = X[:, np.newaxis] - Nmod = Y.shape[1] - - if (testcov is not None) and (cvfolds is None): # we have a separate test dataset - - run_cv = False - cvfolds = 1 - Xte = fileio.load(testcov) - if len(Xte.shape) == 1: - Xte = Xte[:, np.newaxis] - if testresp is not None: - Yte, testmask = load_response_vars(testresp, maskfile) - if len(Yte.shape) == 1: - Yte = Yte[:, np.newaxis] - else: - sub_te = Xte.shape[0] - Yte = np.zeros([sub_te, Nmod]) - - # treat as a single train-test split - testids = range(X.shape[0], X.shape[0]+Xte.shape[0]) - splits = CustomCV((range(0, X.shape[0]),), (testids,)) - - Y = np.concatenate((Y, Yte), axis=0) - X = np.concatenate((X, Xte), axis=0) - - else: - run_cv = True - # we are running under cross-validation - splits = KFold(n_splits=cvfolds) - testids = range(0, X.shape[0]) - - # find and remove bad variables from the response variables - # note: the covariates are assumed to have already been checked - nz = np.where(np.bitwise_and(np.isfinite(Y).any(axis=0), - np.var(Y, axis=0) != 0))[0] - - # run cross-validation loop - Yhat = np.zeros_like(Y) - S2 = np.zeros_like(Y) - Z = np.zeros_like(Y) - nlZ = np.zeros((Nmod, cvfolds)) - - mean_resp = [] - std_resp = [] - mean_cov = [] - std_cov = [] - - if warp is not None: - Ywarp = np.zeros_like(Yhat) - mean_resp_warp = [np.zeros(Y.shape[1]) for s in range(splits.n_splits)] - std_resp_warp = [np.zeros(Y.shape[1]) for s in range(splits.n_splits)] - - for idx in enumerate(splits.split(X)): - - fold = idx[0] - tr = idx[1][0] - te = idx[1][1] - - # standardize responses and covariates, ignoring invalid entries - iy, jy = np.ix_(tr, nz) - mY = np.mean(Y[iy, jy], axis=0) - sY = np.std(Y[iy, jy], axis=0) - mean_resp.append(mY) - std_resp.append(sY) - if standardize: - Yz = np.zeros_like(Y) - Yz[:, nz] = (Y[:, nz] - mY) / sY - mX = np.mean(X[tr, :], axis=0) - sX = np.std(X[tr, :], axis=0) - Xz = (X - mX) / sX - mean_cov.append(mX) - std_cov.append(sX) - else: - Yz = Y - Xz = X - - # estimate the models for all subjects - for i in range(0, len(nz)): - print("Estimating model ", i+1, "of", len(nz)) - nm = norm_init(Xz[tr, :], Yz[tr, nz[i]], alg=alg, **kwargs) - try: - nm = nm.estimate(Xz[tr, :], Yz[tr, nz[i]], **kwargs) - - yhat, s2 = nm.predict(Xz[te, :], Xz[tr, :], Yz[tr, nz[i]], **kwargs) - - if savemodel: - nm.save('Models/NM_' + str(fold) + '_' + str(nz[i]) + outputsuffix + '.pkl' ) - - if standardize: - Yhat[te, nz[i]] = yhat * sY[i] + mY[i] - S2[te, nz[i]] = s2 * sY[i]**2 - else: - Yhat[te, nz[i]] = yhat - S2[te, nz[i]] = s2 - - nlZ[nz[i], fold] = nm.neg_log_lik - - if (run_cv or testresp is not None): - # warp the labels? - if warp is not None: - warp_param = nm.blr.hyp[1:nm.blr.warp.get_n_params()+1] - Ywarp[te, nz[i]] = nm.blr.warp.f(Y[te, nz[i]], warp_param) - Ytest = Ywarp[te, nz[i]] - - # Save warped mean of the training data (for MSLL) - yw = nm.blr.warp.f(Y[tr, nz[i]], warp_param) - mean_resp_warp[fold][i] = np.mean(yw) - std_resp_warp[fold][i] = np.std(yw) - else: - Ytest = Y[te, nz[i]] - - Z[te, nz[i]] = (Ytest - Yhat[te, nz[i]]) / \ - np.sqrt(S2[te, nz[i]]) - - except Exception as e: - exc_type, exc_obj, exc_tb = sys.exc_info() - fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] - print("Model ", i+1, "of", len(nz), - "FAILED!..skipping and writing NaN to outputs") - print("Exception:") - print(e) - print(exc_type, fname, exc_tb.tb_lineno) - - Yhat[te, nz[i]] = float('nan') - S2[te, nz[i]] = float('nan') - nlZ[nz[i], fold] = float('nan') - if testcov is None: - Z[te, nz[i]] = float('nan') - else: - if testresp is not None: - Z[te, nz[i]] = float('nan') - - - if savemodel: - print('Saving model meta-data...') - with open('Models/meta_data.md', 'wb') as file: - pickle.dump({'valid_voxels':nz, 'fold_num':cvfolds, - 'mean_resp':mean_resp, 'std_resp':std_resp, - 'mean_cov':mean_cov, 'std_cov':std_cov, - 'regressor':alg, 'standardize':standardize}, file, protocol=PICKLE_PROTOCOL) - - # compute performance metrics - if (run_cv or testresp is not None): - print("Evaluating the model ...") - if warp is None: - results = evaluate(Y[testids, :], Yhat[testids, :], - S2=S2[testids, :], mY=mean_resp[0], - sY=std_resp[0]) - else: - results = evaluate(Ywarp[testids, :], Yhat[testids, :], - S2=S2[testids, :], mY=mean_resp_warp[0], - sY=std_resp_warp[0]) - - - # Set writing options - if saveoutput: - if (run_cv or testresp is not None): - save_results(respfile, Yhat[testids, :], S2[testids, :], maskvol, - Z=Z[testids, :], results=results, outputsuffix=outputsuffix) - - else: - save_results(respfile, Yhat[testids, :], S2[testids, :], maskvol, - outputsuffix=outputsuffix) - - else: - if (run_cv or testresp is not None): - output = (Yhat[testids, :], S2[testids, :], nm, Z[testids, :], results) - else: - output = (Yhat[testids, :], S2[testids, :], nm) - - return output
- - -
[docs]def fit(covfile, respfile, **kwargs): - - # parse keyword arguments - maskfile = kwargs.pop('maskfile',None) - alg = kwargs.pop('alg','gpr') - savemodel = kwargs.pop('savemodel','True')=='True' - standardize = kwargs.pop('standardize',True) - outputsuffix = kwargs.pop('outputsuffix','_fit') - - if savemodel and not os.path.isdir('Models'): - os.mkdir('Models') - - # load data - print("Processing data in " + respfile) - X = fileio.load(covfile) - Y, maskvol = load_response_vars(respfile, maskfile) - if len(Y.shape) == 1: - Y = Y[:, np.newaxis] - if len(X.shape) == 1: - X = X[:, np.newaxis] - - # find and remove bad variables from the response variables - # note: the covariates are assumed to have already been checked - nz = np.where(np.bitwise_and(np.isfinite(Y).any(axis=0), - np.var(Y, axis=0) != 0))[0] - - mean_resp = [] - std_resp = [] - mean_cov = [] - std_cov = [] - - # standardize responses and covariates, ignoring invalid entries - mY = np.mean(Y[:, nz], axis=0) - sY = np.std(Y[:, nz], axis=0) - mean_resp.append(mY) - std_resp.append(sY) - if standardize: - Yz = np.zeros_like(Y) - Yz[:, nz] = (Y[:, nz] - mY) / sY - mX = np.mean(X, axis=0) - sX = np.std(X, axis=0) - Xz = (X - mX) / sX - mean_resp.append(mY) - std_resp.append(sY) - mean_cov.append(mX) - std_cov.append(sX) - else: - Yz = Y - Xz = X - - # estimate the models for all subjects - for i in range(0, len(nz)): - print("Estimating model ", i+1, "of", len(nz)) - nm = norm_init(Xz, Yz[:, nz[i]], alg=alg, **kwargs) - nm = nm.estimate(Xz, Yz[:, nz[i]], **kwargs) - - if savemodel: - nm.save('Models/NM_' + str(0) + '_' + str(nz[i]) + outputsuffix + '.pkl' ) - - if savemodel: - print('Saving model meta-data...') - with open('Models/meta_data.md', 'wb') as file: - pickle.dump({'valid_voxels':nz, - 'mean_resp':mean_resp, 'std_resp':std_resp, - 'mean_cov':mean_cov, 'std_cov':std_cov, - 'regressor':alg, 'standardize':standardize}, file, protocol=PICKLE_PROTOCOL) - - return nm
- - -
[docs]def predict(covfile, respfile=None, maskfile=None, **kwargs): - ''' - Make predictions on the basis of a pre-estimated normative model - If only the covariates are specified then only predicted mean and variance - will be returned. If the test responses are also specified then quantities - That depend on those will also be returned (Z scores and error metrics) - - Basic usage:: - - predict(covfile, [extra_arguments]) - - where the variables are defined below. - - :param covfile: test covariates used to predict the response variable - :param respfile: test response variables for the normative model - :param maskfile: mask used to apply to the data (nifti only) - :param model_path: Directory containing the normative model and metadata. - When using parallel prediction, do not pass the model path. It will be automatically - decided. - :param output_path: Directory to store the results - :param outputsuffix: Text string to add to the output filenames - :param batch_size: batch size (for use with normative_parallel) - :param job_id: batch id - - All outputs are written to disk in the same format as the input. These are: - - :outputs: * Yhat - predictive mean - * S2 - predictive variance - * Z - Z scores - ''' - - - model_path = kwargs.pop('model_path', 'Models') - job_id = kwargs.pop('job_id', None) - batch_size = kwargs.pop('batch_size', None) - output_path = kwargs.pop('output_path', '') - outputsuffix = kwargs.pop('outputsuffix', '_predict') - inputsuffix = kwargs.pop('inputsuffix', '_estimate') - alg = kwargs.pop('alg') - - if respfile is not None and not os.path.exists(respfile): - print("Response file does not exist. Only returning predictions") - respfile = None - if not os.path.isdir(model_path): - print('Models directory does not exist!') - return - else: - if os.path.exists(os.path.join(model_path, 'meta_data.md')): - with open(os.path.join(model_path, 'meta_data.md'), 'rb') as file: - meta_data = pickle.load(file) - standardize = meta_data['standardize'] - mY = meta_data['mean_resp'] - sY = meta_data['std_resp'] - mX = meta_data['mean_cov'] - sX = meta_data['std_cov'] - else: - standardize = False - - if batch_size is not None: - batch_size = int(batch_size) - job_id = int(job_id) - 1 - - if (output_path != '') and (not os.path.isdir(output_path)): - os.mkdir(output_path) - - # load data - print("Loading data ...") - X = fileio.load(covfile) - if len(X.shape) == 1: - X = X[:, np.newaxis] - - sample_num = X.shape[0] - feature_num = len(glob.glob(os.path.join(model_path, 'NM_*' + inputsuffix + '.pkl'))) - - Yhat = np.zeros([sample_num, feature_num]) - S2 = np.zeros([sample_num, feature_num]) - Z = np.zeros([sample_num, feature_num]) - - - if standardize: - Xz = (X - mX[0]) / sX[0] - else: - Xz = X - - # estimate the models for all subjects - for i in range(feature_num): - print("Prediction by model ", i+1, "of", feature_num) - nm = norm_init(Xz) - nm = nm.load(os.path.join(model_path, 'NM_' + str(0) + '_' + - str(i) + inputsuffix + '.pkl')) - if (alg!='hbr' or nm.configs['transferred']==False): - yhat, s2 = nm.predict(Xz, **kwargs) - else: - tsbefile = kwargs.pop('tsbefile') - batch_effects_test = fileio.load(tsbefile) - yhat, s2 = nm.predict_on_new_sites(Xz, batch_effects_test) - - if standardize: - Yhat[:, i] = yhat.squeeze() * sY[0][i] + mY[0][i] - S2[:, i] = s2.squeeze() * sY[0][i]**2 - else: - Yhat[:, i] = yhat.squeeze() - S2[:, i] = s2.squeeze() - - if respfile is None: - save_results(None, Yhat, S2, None, outputsuffix=outputsuffix) - - return (Yhat, S2) - - else: - Y, maskvol = load_response_vars(respfile, maskfile) - if len(Y.shape) == 1: - Y = Y[:, np.newaxis] - - # warp the targets? - if 'blr' in dir(nm): - if nm.blr.warp is not None: - warp_param = nm.blr.hyp[1:nm.blr.warp.get_n_params()+1] - Y = nm.blr.warp.f(Y, warp_param) - - Z = (Y - Yhat) / np.sqrt(S2) - - print("Evaluating the model ...") - results = evaluate(Y, Yhat, S2=S2, - metrics = ['Rho', 'RMSE', 'SMSE', 'EXPV']) - - print("Evaluations Writing outputs ...") - save_results(respfile, Yhat, S2, maskvol, Z=Z, outputsuffix=outputsuffix, - results=results, save_path=output_path) - - return (Yhat, S2, Z)
- - -
[docs]def transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None, - **kwargs): - ''' - Transfer learning on the basis of a pre-estimated normative model by using - the posterior distribution over the parameters as an informed prior for - new data. currently only supported for HBR. - - Basic usage:: - - transfer(covfile, respfile [extra_arguments]) - - where the variables are defined below. - - :param covfile: test covariates used to predict the response variable - :param respfile: test response variables for the normative model - :param maskfile: mask used to apply to the data (nifti only) - :param testcov: Test covariates - :param testresp: Test responses - :param model_path: Directory containing the normative model and metadata - :param trbefile: Training batch effects file - :param batch_size: batch size (for use with normative_parallel) - :param job_id: batch id - - All outputs are written to disk in the same format as the input. These are: - - :outputs: * Yhat - predictive mean - * S2 - predictive variance - * Z - Z scores - ''' - - alg = kwargs.pop('alg') - if alg != 'hbr': - print('Model transferring is only possible for HBR models.') - return - elif (not 'model_path' in list(kwargs.keys())) or \ - (not 'output_path' in list(kwargs.keys())) or \ - (not 'trbefile' in list(kwargs.keys())): - print('InputError: Some mandatory arguments are missing.') - return - else: - model_path = kwargs.pop('model_path') - output_path = kwargs.pop('output_path') - trbefile = kwargs.pop('trbefile') - batch_effects_train = fileio.load(trbefile) - - outputsuffix = kwargs.pop('outputsuffix', '_transfer') - inputsuffix = kwargs.pop('inputsuffix', '_estimate') - tsbefile = kwargs.pop('tsbefile', None) - - job_id = kwargs.pop('job_id', None) - batch_size = kwargs.pop('batch_size', None) - if batch_size is not None: - batch_size = int(batch_size) - job_id = int(job_id) - 1 - - if not os.path.isdir(output_path): - os.mkdir(output_path) - - # load data - print("Loading data ...") - X = fileio.load(covfile) - Y, maskvol = load_response_vars(respfile, maskfile) - if len(Y.shape) == 1: - Y = Y[:, np.newaxis] - if len(X.shape) == 1: - X = X[:, np.newaxis] - feature_num = Y.shape[1] - mY = np.mean(Y, axis=0) - sY = np.std(Y, axis=0) - - if testcov is not None: - # we have a separate test dataset - Xte = fileio.load(testcov) - if len(Xte.shape) == 1: - Xte = Xte[:, np.newaxis] - ts_sample_num = Xte.shape[0] - if testresp is not None: - Yte, testmask = load_response_vars(testresp, maskfile) - if len(Yte.shape) == 1: - Yte = Yte[:, np.newaxis] - else: - Yte = np.zeros([ts_sample_num, feature_num]) - - if tsbefile is not None: - batch_effects_test = fileio.load(tsbefile) - else: - batch_effects_test = np.zeros([Xte.shape[0],2]) - - Yhat = np.zeros([ts_sample_num, feature_num]) - S2 = np.zeros([ts_sample_num, feature_num]) - Z = np.zeros([ts_sample_num, feature_num]) - - # estimate the models for all subjects - for i in range(feature_num): - - nm = norm_init(X) - if batch_size is not None: # when using normative_parallel - print("Transferring model ", job_id*batch_size+i) - nm = nm.load(os.path.join(model_path, 'NM_0_' + - str(job_id*batch_size+i) + inputsuffix + '.pkl')) - else: - print("Transferring model ", i+1, "of", feature_num) - nm = nm.load(os.path.join(model_path, 'NM_0_' + str(i) + inputsuffix + '.pkl')) - - nm = nm.estimate_on_new_sites(X, Y[:,i], batch_effects_train) - if batch_size is not None: - nm.save(os.path.join(output_path, 'NM_0_' + - str(job_id*batch_size+i) + outputsuffix + '.pkl')) - nm.save(os.path.join('Models', 'NM_0_' + - str(i) + outputsuffix + '.pkl')) - else: - nm.save(os.path.join(output_path, 'NM_0_' + - str(i) + outputsuffix + '.pkl')) - - if testcov is not None: - yhat, s2 = nm.predict_on_new_sites(Xte, batch_effects_test) - Yhat[:, i] = yhat.squeeze() - S2[:, i] = s2.squeeze() - - if testresp is None: - save_results(respfile, Yhat, S2, maskvol, outputsuffix=outputsuffix) - return (Yhat, S2) - else: - Z = (Yte - Yhat) / np.sqrt(S2) - - print("Evaluating the model ...") - results = evaluate(Yte, Yhat, S2=S2, mY=mY, sY=sY) - - save_results(respfile, Yhat, S2, maskvol, Z=Z, results=results, - outputsuffix=outputsuffix) - - return (Yhat, S2, Z)
- - -
[docs]def extend(covfile, respfile, maskfile=None, **kwargs): - - alg = kwargs.pop('alg') - if alg != 'hbr': - print('Model extention is only possible for HBR models.') - return - elif (not 'model_path' in list(kwargs.keys())) or \ - (not 'output_path' in list(kwargs.keys())) or \ - (not 'trbefile' in list(kwargs.keys())) or \ - (not 'dummycovfile' in list(kwargs.keys()))or \ - (not 'dummybefile' in list(kwargs.keys())): - print('InputError: Some mandatory arguments are missing.') - return - else: - model_path = kwargs.pop('model_path') - output_path = kwargs.pop('output_path') - trbefile = kwargs.pop('trbefile') - dummycovfile = kwargs.pop('dummycovfile') - dummybefile = kwargs.pop('dummybefile') - - outputsuffix = kwargs.pop('outputsuffix', '_extend') - inputsuffix = kwargs.pop('inputsuffix', '_estimate') - informative_prior = kwargs.pop('job_id', 'False') == 'True' - generation_factor = int(kwargs.pop('generation_factor', '10')) - job_id = kwargs.pop('job_id', None) - batch_size = kwargs.pop('batch_size', None) - if batch_size is not None: - batch_size = int(batch_size) - job_id = int(job_id) - 1 - - if not os.path.isdir(output_path): - os.mkdir(output_path) - - # load data - print("Loading data ...") - X = fileio.load(covfile) - Y, maskvol = load_response_vars(respfile, maskfile) - batch_effects_train = fileio.load(trbefile) - X_dummy = fileio.load(dummycovfile) - batch_effects_dummy = fileio.load(dummybefile) - - if len(Y.shape) == 1: - Y = Y[:, np.newaxis] - if len(X.shape) == 1: - X = X[:, np.newaxis] - if len(X_dummy.shape) == 1: - X_dummy = X_dummy[:, np.newaxis] - feature_num = Y.shape[1] - - # estimate the models for all subjects - for i in range(feature_num): - - nm = norm_init(X) - if batch_size is not None: # when using nirmative_parallel - print("Extending model ", job_id*batch_size+i) - nm = nm.load(os.path.join(model_path, 'NM_0_' + - str(job_id*batch_size+i) + inputsuffix + '.pkl')) - else: - print("Extending model ", i+1, "of", feature_num) - nm = nm.load(os.path.join(model_path, 'NM_0_' + str(i) + inputsuffix +'.pkl')) - - nm = nm.extend(X, Y[:,i:i+1], batch_effects_train, X_dummy, batch_effects_dummy, - samples=generation_factor, informative_prior=informative_prior) - - if batch_size is not None: - nm.save(os.path.join(output_path, 'NM_0_' + - str(job_id*batch_size+i) + outputsuffix + '.pkl')) - nm.save(os.path.join('Models', 'NM_0_' + - str(i) + outputsuffix + '.pkl')) - else: - nm.save(os.path.join(output_path, 'NM_0_' + - str(i) + outputsuffix + '.pkl'))
- - -
[docs]def main(*args): - """ Parse arguments and estimate model - """ - - np.seterr(invalid='ignore') - - rfile, mfile, cfile, cv, tcfile, trfile, func, alg, cfg, std, kw = get_args(args) - - # collect required arguments - pos_args = ['cfile', 'rfile'] - - # collect basic keyword arguments controlling model estimation - kw_args = ['maskfile=mfile', - 'cvfolds=cv', - 'testcov=tcfile', - 'testresp=trfile', - 'alg=alg', - 'configparam=cfg', - 'standardize=std'] - - # add additional keyword arguments - for k in kw: - kw_args.append(k + '=' + "'" + kw[k] + "'") - all_args = ', '.join(pos_args + kw_args) - - # Executing the target function - exec(func + '(' + all_args + ')')
- -# For running from the command line: -if __name__ == "__main__": - main(sys.argv[1:]) -
- -
-
-
-
- -
-
- - - - \ No newline at end of file diff --git a/doc/build/html/_modules/normative_parallel.html b/doc/build/html/_modules/normative_parallel.html index 0e78f723..fe089d7a 100644 --- a/doc/build/html/_modules/normative_parallel.html +++ b/doc/build/html/_modules/normative_parallel.html @@ -1,1169 +1,1385 @@ - - - - - - - normative_parallel — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + normative_parallel — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
-
-
-
+ + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • normative_parallel
  • + + +
  • + +
  • + +
+ + +
+
+
+

Source code for normative_parallel

-#!/.../anaconda/bin/python/
-
-# -----------------------------------------------------------------------------
-# Run parallel normative modelling.
-# All processing takes place in the processing directory (processing_dir)
-# All inputs should be text files or binaries and space seperated
-#
-# It is possible to run these functions using...
-#
-# * k-fold cross-validation
-# * estimating a training dataset then applying to a second test dataset
-#
-# First,the data is split for parallel processing.
-# Second, the splits are submitted to the cluster.
-# Third, the output is collected and combined.
-#
-# witten by (primarily) T Wolfers, (adaptated) SM Kia, H Huijsdens, L Parks, 
-# AF Marquand
-# -----------------------------------------------------------------------------
-
-from __future__ import print_function
-from __future__ import division
-
-import os
-import sys
-import glob
-import shutil
-import pickle
-import fileinput
-import numpy as np
-import pandas as pd
-from subprocess import call
-
-try:
-    import pcntoolkit as ptk
-    import pcntoolkit.fileio as fileio
-    from pcntoolkit import configs
-except ImportError:
-    pass
-    path = os.path.abspath(os.path.dirname(__file__))
-    if path not in sys.path:
-        sys.path.append(path)
-        del path
-    import fileio
-    import configs
+#!/.../anaconda/bin/python/
+
+# -----------------------------------------------------------------------------
+# Run parallel normative modelling.
+# All processing takes place in the processing directory (processing_dir)
+# All inputs should be text files or binaries and space seperated
+#
+# It is possible to run these functions using...
+#
+# * k-fold cross-validation
+# * estimating a training dataset then applying to a second test dataset
+#
+# First,the data is split for parallel processing.
+# Second, the splits are submitted to the cluster.
+# Third, the output is collected and combined.
+#
+# witten by (primarily) T Wolfers, (adaptated) SM Kia, H Huijsdens, L Parks, 
+# AF Marquand
+# -----------------------------------------------------------------------------
+
+from __future__ import print_function
+from __future__ import division
+
+import os
+import sys
+import glob
+import shutil
+import pickle
+import fileinput
+import numpy as np
+import pandas as pd
+from subprocess import call
+
+try:
+    import pcntoolkit as ptk
+    import pcntoolkit.dataio.fileio as fileio
+    from pcntoolkit import configs
+    ptkpath = ptk.__path__[0] 
+except ImportError:
+    pass
+    ptkpath = os.path.abspath(os.path.dirname(__file__))
+    if ptkpath not in sys.path:
+        sys.path.append(ptkpath)
+    import dataio.fileio as fileio
+    import configs
     
-PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL
-
-
-
[docs]def execute_nm(processing_dir, - python_path, - job_name, - covfile_path, - respfile_path, - batch_size, - memory, - duration, - normative_path=None, - func='estimate', - **kwargs): - - """ - This function is a mother function that executes all parallel normative - modelling routines. Different specifications are possible using the sub- - functions. - - :Parameters: - * processing_dir -> Full path to the processing dir - * python_path -> Full path to the python distribution - * normative_path -> Full path to the normative.py. If None (default) - then it will automatically retrieves the path from - the installed packeage. - * job_name -> Name for the bash script that is the output of - this function - * covfile_path -> Full path to a .txt file that contains all - covariats (subjects x covariates) for the - responsefile - * respfile_path -> Full path to a .txt that contains all features - (subjects x features) - * batch_size -> Number of features in each batch - * memory -> Memory requirements written as string - for example 4gb or 500mb - * duation -> The approximate duration of the job, a string - with HH:MM:SS for example 01:01:01 - * cv_folds -> Number of cross validations - * testcovfile_path -> Full path to a .txt file that contains all - covariats (subjects x covariates) for the - testresponse file - * testrespfile_path -> Full path to a .txt file that contains all - test features - * log_path -> Pathfor saving log files - * binary -> If True uses binary format for response file - otherwise it is text - - written by (primarily) T Wolfers, (adapted) SM Kia - """ - if normative_path is None: - normative_path = ptk.__path__[0] + '/normative.py' +PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL + + +
[docs]def execute_nm(processing_dir, + python_path, + job_name, + covfile_path, + respfile_path, + batch_size, + memory, + duration, + normative_path=None, + func='estimate', + **kwargs): + + """ + This function is a mother function that executes all parallel normative + modelling routines. Different specifications are possible using the sub- + functions. + + :Parameters: + * processing_dir -> Full path to the processing dir + * python_path -> Full path to the python distribution + * normative_path -> Full path to the normative.py. If None (default) + then it will automatically retrieves the path from + the installed packeage. + * job_name -> Name for the bash script that is the output of + this function + * covfile_path -> Full path to a .txt file that contains all + covariats (subjects x covariates) for the + responsefile + * respfile_path -> Full path to a .txt that contains all features + (subjects x features) + * batch_size -> Number of features in each batch + * memory -> Memory requirements written as string + for example 4gb or 500mb + * duation -> The approximate duration of the job, a string + with HH:MM:SS for example 01:01:01 + * cv_folds -> Number of cross validations + * testcovfile_path -> Full path to a .txt file that contains all + covariats (subjects x covariates) for the + testresponse file + * testrespfile_path -> Full path to a .txt file that contains all + test features + * log_path -> Pathfor saving log files + * binary -> If True uses binary format for response file + otherwise it is text + + written by (primarily) T Wolfers, (adapted) SM Kia + """ + + if normative_path is None: + normative_path = ptkpath + '/normative.py' - cv_folds = kwargs.get('cv_folds', None) - testcovfile_path = kwargs.get('testcovfile_path', None) - testrespfile_path= kwargs.get('testrespfile_path', None) - cluster_spec = kwargs.pop('cluster_spec', 'torque') - log_path = kwargs.pop('log_path', None) - binary = kwargs.pop('binary', False) + cv_folds = kwargs.get('cv_folds', None) + testcovfile_path = kwargs.get('testcovfile_path', None) + testrespfile_path= kwargs.get('testrespfile_path', None) + cluster_spec = kwargs.pop('cluster_spec', 'torque') + log_path = kwargs.pop('log_path', None) + binary = kwargs.pop('binary', False) - split_nm(processing_dir, - respfile_path, - batch_size, - binary, - **kwargs) - - batch_dir = glob.glob(processing_dir + 'batch_*') - # print(batch_dir) - number_of_batches = len(batch_dir) - # print(number_of_batches) - - if binary: - file_extentions = '.pkl' - else: - file_extentions = '.txt' + split_nm(processing_dir, + respfile_path, + batch_size, + binary, + **kwargs) + + batch_dir = glob.glob(processing_dir + 'batch_*') + # print(batch_dir) + number_of_batches = len(batch_dir) + # print(number_of_batches) + + if binary: + file_extentions = '.pkl' + else: + file_extentions = '.txt' - kwargs.update({'batch_size':str(batch_size)}) - for n in range(1, number_of_batches+1): - print(n) - kwargs.update({'job_id':str(n)}) - if testrespfile_path is not None: - if cv_folds is not None: - raise(ValueError, """If the response file is specified - cv_folds must be equal to None""") - else: - # specified train/test split - batch_processing_dir = processing_dir + 'batch_' + str(n) + '/' - batch_job_name = job_name + '_' + str(n) + '.sh' - batch_respfile_path = (batch_processing_dir + 'resp_batch_' + - str(n) + file_extentions) - batch_testrespfile_path = (batch_processing_dir + - 'testresp_batch_' + - str(n) + file_extentions) - batch_job_path = batch_processing_dir + batch_job_name - if cluster_spec == 'torque': - # update the response file - kwargs.update({'testrespfile_path': \ - batch_testrespfile_path}) - bashwrap_nm(batch_processing_dir, - python_path, - normative_path, - batch_job_name, - covfile_path, - batch_respfile_path, - func=func, - **kwargs) - qsub_nm(job_path=batch_job_path, - log_path=log_path, - memory=memory, - duration=duration) - elif cluster_spec == 'sbatch': - # update the response file - kwargs.update({'testrespfile_path': \ - batch_testrespfile_path}) - sbatchwrap_nm(batch_processing_dir, - python_path, - normative_path, - batch_job_name, - covfile_path, - batch_respfile_path, - func=func, - memory=memory, - duration=duration, - **kwargs) - sbatch_nm(job_path=batch_job_path, - log_path=log_path) - elif cluster_spec == 'new': - # this part requires addition in different envioronment [ - sbatchwrap_nm(processing_dir=batch_processing_dir, func=func, - **kwargs) - sbatch_nm(processing_dir=batch_processing_dir) - # ] - if testrespfile_path is None: - if testcovfile_path is not None: - # forward model - batch_processing_dir = processing_dir + 'batch_' + str(n) + '/' - batch_job_name = job_name + '_' + str(n) + '.sh' - batch_respfile_path = (batch_processing_dir + 'resp_batch_' + - str(n) + file_extentions) - batch_job_path = batch_processing_dir + batch_job_name - if cluster_spec == 'torque': - bashwrap_nm(batch_processing_dir, - python_path, - normative_path, - batch_job_name, - covfile_path, - batch_respfile_path, - func=func, - **kwargs) - qsub_nm(job_path=batch_job_path, - log_path=log_path, - memory=memory, - duration=duration) - elif cluster_spec == 'sbatch': - sbatchwrap_nm(batch_processing_dir, - python_path, - normative_path, - batch_job_name, - covfile_path, - batch_respfile_path, - func=func, - memory=memory, - duration=duration, - **kwargs) - sbatch_nm(job_path=batch_job_path, - log_path=log_path) - elif cluster_spec == 'new': - # this part requires addition in different envioronment [ - bashwrap_nm(processing_dir=batch_processing_dir, func=func, - **kwargs) - qsub_nm(processing_dir=batch_processing_dir) - # ] - else: - # cross-validation - batch_processing_dir = (processing_dir + 'batch_' + - str(n) + '/') - batch_job_name = job_name + '_' + str(n) + '.sh' - batch_respfile_path = (batch_processing_dir + - 'resp_batch_' + str(n) + - file_extentions) - batch_job_path = batch_processing_dir + batch_job_name - if cluster_spec == 'torque': - bashwrap_nm(batch_processing_dir, - python_path, - normative_path, - batch_job_name, - covfile_path, - batch_respfile_path, - func=func, - **kwargs) - qsub_nm(job_path=batch_job_path, - log_path=log_path, - memory=memory, - duration=duration) - elif cluster_spec == 'sbatch': - sbatchwrap_nm(batch_processing_dir, - python_path, - normative_path, - batch_job_name, - covfile_path, - batch_respfile_path, - func=func, - memory=memory, - duration=duration, - **kwargs) - sbatch_nm(job_path=batch_job_path, - log_path=log_path) - elif cluster_spec == 'new': - # this part requires addition in different envioronment [ - bashwrap_nm(processing_dir=batch_processing_dir, func=func, - **kwargs) - qsub_nm(processing_dir=batch_processing_dir)
- # ] - - -"""routines that are environment independent""" - - -
[docs]def split_nm(processing_dir, - respfile_path, - batch_size, - binary, - **kwargs): - - """ This function prepares the input files for normative_parallel. - - :Parameters: - * processing_dir -> Full path to the folder of processing - * respfile_path -> Full path to the responsefile.txt - (subjects x features) - * batch_size -> Number of features in each batch - * testrespfile_path -> Full path to the test responsefile.txt - (subjects x features) - * binary -> If True binary file - - :outputs: - * The creation of a folder struture for batch-wise processing - - witten by (primarily) T Wolfers (adapted) SM Kia - """ + kwargs.update({'batch_size':str(batch_size)}) + for n in range(1, number_of_batches+1): + print(n) + kwargs.update({'job_id':str(n)}) + if testrespfile_path is not None: + if cv_folds is not None: + raise(ValueError, """If the response file is specified + cv_folds must be equal to None""") + else: + # specified train/test split + batch_processing_dir = processing_dir + 'batch_' + str(n) + '/' + batch_job_name = job_name + '_' + str(n) + '.sh' + batch_respfile_path = (batch_processing_dir + 'resp_batch_' + + str(n) + file_extentions) + batch_testrespfile_path = (batch_processing_dir + + 'testresp_batch_' + + str(n) + file_extentions) + batch_job_path = batch_processing_dir + batch_job_name + if cluster_spec == 'torque': + # update the response file + kwargs.update({'testrespfile_path': \ + batch_testrespfile_path}) + bashwrap_nm(batch_processing_dir, + python_path, + normative_path, + batch_job_name, + covfile_path, + batch_respfile_path, + func=func, + **kwargs) + qsub_nm(job_path=batch_job_path, + log_path=log_path, + memory=memory, + duration=duration) + elif cluster_spec == 'sbatch': + # update the response file + kwargs.update({'testrespfile_path': \ + batch_testrespfile_path}) + sbatchwrap_nm(batch_processing_dir, + python_path, + normative_path, + batch_job_name, + covfile_path, + batch_respfile_path, + func=func, + memory=memory, + duration=duration, + **kwargs) + sbatch_nm(job_path=batch_job_path, + log_path=log_path) + elif cluster_spec == 'new': + # this part requires addition in different envioronment [ + sbatchwrap_nm(processing_dir=batch_processing_dir, + func=func, **kwargs) + sbatch_nm(processing_dir=batch_processing_dir) + # ] + if testrespfile_path is None: + if testcovfile_path is not None: + # forward model + batch_processing_dir = processing_dir + 'batch_' + str(n) + '/' + batch_job_name = job_name + '_' + str(n) + '.sh' + batch_respfile_path = (batch_processing_dir + 'resp_batch_' + + str(n) + file_extentions) + batch_job_path = batch_processing_dir + batch_job_name + if cluster_spec == 'torque': + bashwrap_nm(batch_processing_dir, + python_path, + normative_path, + batch_job_name, + covfile_path, + batch_respfile_path, + func=func, + **kwargs) + qsub_nm(job_path=batch_job_path, + log_path=log_path, + memory=memory, + duration=duration) + elif cluster_spec == 'sbatch': + sbatchwrap_nm(batch_processing_dir, + python_path, + normative_path, + batch_job_name, + covfile_path, + batch_respfile_path, + func=func, + memory=memory, + duration=duration, + **kwargs) + sbatch_nm(job_path=batch_job_path, + log_path=log_path) + elif cluster_spec == 'new': + # this part requires addition in different envioronment [ + bashwrap_nm(processing_dir=batch_processing_dir, func=func, + **kwargs) + qsub_nm(processing_dir=batch_processing_dir) + # ] + else: + # cross-validation + batch_processing_dir = (processing_dir + 'batch_' + + str(n) + '/') + batch_job_name = job_name + '_' + str(n) + '.sh' + batch_respfile_path = (batch_processing_dir + + 'resp_batch_' + str(n) + + file_extentions) + batch_job_path = batch_processing_dir + batch_job_name + if cluster_spec == 'torque': + bashwrap_nm(batch_processing_dir, + python_path, + normative_path, + batch_job_name, + covfile_path, + batch_respfile_path, + func=func, + **kwargs) + qsub_nm(job_path=batch_job_path, + log_path=log_path, + memory=memory, + duration=duration) + elif cluster_spec == 'sbatch': + sbatchwrap_nm(batch_processing_dir, + python_path, + normative_path, + batch_job_name, + covfile_path, + batch_respfile_path, + func=func, + memory=memory, + duration=duration, + **kwargs) + sbatch_nm(job_path=batch_job_path, + log_path=log_path) + elif cluster_spec == 'new': + # this part requires addition in different envioronment [ + bashwrap_nm(processing_dir=batch_processing_dir, func=func, + **kwargs) + qsub_nm(processing_dir=batch_processing_dir)
+ # ] + + +"""routines that are environment independent""" + + +
[docs]def split_nm(processing_dir, + respfile_path, + batch_size, + binary, + **kwargs): + + """ This function prepares the input files for normative_parallel. + + :Parameters: + * processing_dir -> Full path to the folder of processing + * respfile_path -> Full path to the responsefile.txt + (subjects x features) + * batch_size -> Number of features in each batch + * testrespfile_path -> Full path to the test responsefile.txt + (subjects x features) + * binary -> If True binary file + + :outputs: + * The creation of a folder struture for batch-wise processing + + witten by (primarily) T Wolfers (adapted) SM Kia + """ - testrespfile_path = kwargs.pop('testrespfile_path', None) - - dummy, respfile_extension = os.path.splitext(respfile_path) - if (binary and respfile_extension != '.pkl'): - raise(ValueError, """If binary is True the file format for the - testrespfile file must be .pkl""") - elif (binary==False and respfile_extension != '.txt'): - raise(ValueError, """If binary is False the file format for the - testrespfile file must be .txt""") - - # splits response into batches - if testrespfile_path is None: - if (binary==False): - respfile = fileio.load_ascii(respfile_path) - else: - respfile = pd.read_pickle(respfile_path) - - respfile = pd.DataFrame(respfile) - - numsub = respfile.shape[1] - batch_vec = np.arange(0, - numsub, - batch_size) - batch_vec = np.append(batch_vec, - numsub) + testrespfile_path = kwargs.pop('testrespfile_path', None) + + dummy, respfile_extension = os.path.splitext(respfile_path) + if (binary and respfile_extension != '.pkl'): + raise(ValueError, """If binary is True the file format for the + testrespfile file must be .pkl""") + elif (binary==False and respfile_extension != '.txt'): + raise(ValueError, """If binary is False the file format for the + testrespfile file must be .txt""") + + # splits response into batches + if testrespfile_path is None: + if (binary==False): + respfile = fileio.load_ascii(respfile_path) + else: + respfile = pd.read_pickle(respfile_path) + + respfile = pd.DataFrame(respfile) + + numsub = respfile.shape[1] + batch_vec = np.arange(0, + numsub, + batch_size) + batch_vec = np.append(batch_vec, + numsub) - for n in range(0, (len(batch_vec) - 1)): - resp_batch = respfile.iloc[:, (batch_vec[n]): batch_vec[n + 1]] - os.chdir(processing_dir) - resp = str('resp_batch_' + str(n+1)) - batch = str('batch_' + str(n+1)) - if not os.path.exists(processing_dir + batch): - os.makedirs(processing_dir + batch) - os.makedirs(processing_dir + batch + '/Models/') - if (binary==False): - fileio.save_pd(resp_batch, - processing_dir + batch + '/' + - resp + '.txt') - else: - resp_batch.to_pickle(processing_dir + batch + '/' + - resp + '.pkl', protocol=PICKLE_PROTOCOL) - - # splits response and test responsefile into batches - else: - dummy, testrespfile_extension = os.path.splitext(testrespfile_path) - if (binary and testrespfile_extension != '.pkl'): - raise(ValueError, """If binary is True the file format for the - testrespfile file must be .pkl""") - elif(binary==False and testrespfile_extension != '.txt'): - raise(ValueError, """If binary is False the file format for the - testrespfile file must be .txt""") - - if (binary==False): - respfile = fileio.load_ascii(respfile_path) - testrespfile = fileio.load_ascii(testrespfile_path) - else: - respfile = pd.read_pickle(respfile_path) - testrespfile = pd.read_pickle(testrespfile_path) - - respfile = pd.DataFrame(respfile) - testrespfile = pd.DataFrame(testrespfile) - - numsub = respfile.shape[1] - batch_vec = np.arange(0, numsub, - batch_size) - batch_vec = np.append(batch_vec, - numsub) - for n in range(0, (len(batch_vec) - 1)): - resp_batch = respfile.iloc[:, (batch_vec[n]): batch_vec[n + 1]] - testresp_batch = testrespfile.iloc[:, (batch_vec[n]): batch_vec[n + - 1]] - os.chdir(processing_dir) - resp = str('resp_batch_' + str(n+1)) - testresp = str('testresp_batch_' + str(n+1)) - batch = str('batch_' + str(n+1)) - if not os.path.exists(processing_dir + batch): - os.makedirs(processing_dir + batch) - os.makedirs(processing_dir + batch + '/Models/') - if (binary==False): - fileio.save_pd(resp_batch, - processing_dir + batch + '/' + - resp + '.txt') - fileio.save_pd(testresp_batch, - processing_dir + batch + '/' + testresp + - '.txt') - else: - resp_batch.to_pickle(processing_dir + batch + '/' + - resp + '.pkl', protocol=PICKLE_PROTOCOL) - testresp_batch.to_pickle(processing_dir + batch + '/' + - testresp + '.pkl', protocol=PICKLE_PROTOCOL)
- - -
[docs]def collect_nm(processing_dir, - job_name, - func='estimate', - collect=False, - binary=False, - batch_size=None, - outputsuffix='_estimate'): + for n in range(0, (len(batch_vec) - 1)): + resp_batch = respfile.iloc[:, (batch_vec[n]): batch_vec[n + 1]] + os.chdir(processing_dir) + resp = str('resp_batch_' + str(n+1)) + batch = str('batch_' + str(n+1)) + if not os.path.exists(processing_dir + batch): + os.makedirs(processing_dir + batch) + os.makedirs(processing_dir + batch + '/Models/') + if (binary==False): + fileio.save_pd(resp_batch, + processing_dir + batch + '/' + + resp + '.txt') + else: + resp_batch.to_pickle(processing_dir + batch + '/' + + resp + '.pkl', protocol=PICKLE_PROTOCOL) + + # splits response and test responsefile into batches + else: + dummy, testrespfile_extension = os.path.splitext(testrespfile_path) + if (binary and testrespfile_extension != '.pkl'): + raise(ValueError, """If binary is True the file format for the + testrespfile file must be .pkl""") + elif(binary==False and testrespfile_extension != '.txt'): + raise(ValueError, """If binary is False the file format for the + testrespfile file must be .txt""") + + if (binary==False): + respfile = fileio.load_ascii(respfile_path) + testrespfile = fileio.load_ascii(testrespfile_path) + else: + respfile = pd.read_pickle(respfile_path) + testrespfile = pd.read_pickle(testrespfile_path) + + respfile = pd.DataFrame(respfile) + testrespfile = pd.DataFrame(testrespfile) + + numsub = respfile.shape[1] + batch_vec = np.arange(0, numsub, + batch_size) + batch_vec = np.append(batch_vec, + numsub) + for n in range(0, (len(batch_vec) - 1)): + resp_batch = respfile.iloc[:, (batch_vec[n]): batch_vec[n + 1]] + testresp_batch = testrespfile.iloc[:, (batch_vec[n]): batch_vec[n + + 1]] + os.chdir(processing_dir) + resp = str('resp_batch_' + str(n+1)) + testresp = str('testresp_batch_' + str(n+1)) + batch = str('batch_' + str(n+1)) + if not os.path.exists(processing_dir + batch): + os.makedirs(processing_dir + batch) + os.makedirs(processing_dir + batch + '/Models/') + if (binary==False): + fileio.save_pd(resp_batch, + processing_dir + batch + '/' + + resp + '.txt') + fileio.save_pd(testresp_batch, + processing_dir + batch + '/' + testresp + + '.txt') + else: + resp_batch.to_pickle(processing_dir + batch + '/' + + resp + '.pkl', protocol=PICKLE_PROTOCOL) + testresp_batch.to_pickle(processing_dir + batch + '/' + + testresp + '.pkl', + protocol=PICKLE_PROTOCOL)
+ + +
[docs]def collect_nm(processing_dir, + job_name, + func='estimate', + collect=False, + binary=False, + batch_size=None, + outputsuffix='_estimate'): - """This function checks and collects all batches. + """This function checks and collects all batches. - :Parameters: - * processing_dir -> Full path to the processing directory - * collect -> If True data is checked for failed batches - and collected; if False data is just checked - * binary -> Results in pkl format? + :Parameters: + * processing_dir -> Full path to the processing directory + * collect -> If True data is checked for failed batches + and collected; if False data is just checked + * binary -> Results in pkl format? - :ouptuts: - * Text files containing all results accross all batches the combined - output (written to disk) - * returns 0 if batches fail, 1 otherwise + :ouptuts: + * Text files containing all results accross all batches the combined + output (written to disk) + * returns 0 if batches fail, 1 otherwise - written by (primarily) T Wolfers, (adapted) SM Kia - """ + written by (primarily) T Wolfers, (adapted) SM Kia + """ - if binary: - file_extentions = '.pkl' - else: - file_extentions = '.txt' + if binary: + file_extentions = '.pkl' + else: + file_extentions = '.txt' - # detect number of subjects, batches, hyperparameters and CV - batches = glob.glob(processing_dir + 'batch_*/') + # detect number of subjects, batches, hyperparameters and CV + batches = glob.glob(processing_dir + 'batch_*/') - count = 0 - batch_fail = [] + count = 0 + batch_fail = [] - if func != 'fit': - file_example = [] - for batch in batches: - if file_example == []: - file_example = glob.glob(batch + 'yhat' + outputsuffix + file_extentions) - else: - break - if binary is False: - file_example = fileio.load(file_example[0]) - else: - file_example = pd.read_pickle(file_example[0]) - numsubjects = file_example.shape[0] - batch_size = file_example.shape[1] + if func != 'fit': + file_example = [] + # TODO: Collect_nm only depends on yhat, thus does not work when no + # prediction is made (when test cov is not specified). + for batch in batches: + if file_example == []: + file_example = glob.glob(batch + 'yhat' + outputsuffix + + file_extentions) + else: + break + if binary is False: + file_example = fileio.load(file_example[0]) + else: + file_example = pd.read_pickle(file_example[0]) + numsubjects = file_example.shape[0] + batch_size = file_example.shape[1] - # artificially creates files for batches that were not executed - batch_dirs = glob.glob(processing_dir + 'batch_*/') - batch_dirs = fileio.sort_nicely(batch_dirs) - for batch in batch_dirs: - filepath = glob.glob(batch + 'yhat' + outputsuffix + '*') - if filepath == []: - count = count+1 - batch1 = glob.glob(batch + '/' + job_name + '*.sh') - print(batch1) - batch_fail.append(batch1) - if collect is True: - pRho = np.ones(batch_size) - pRho = pRho.transpose() - pRho = pd.Series(pRho) - fileio.save(pRho, batch + 'pRho' + outputsuffix + file_extentions) + # artificially creates files for batches that were not executed + batch_dirs = glob.glob(processing_dir + 'batch_*/') + batch_dirs = fileio.sort_nicely(batch_dirs) + for batch in batch_dirs: + filepath = glob.glob(batch + 'yhat' + outputsuffix + '*') + if filepath == []: + count = count+1 + batch1 = glob.glob(batch + '/' + job_name + '*.sh') + print(batch1) + batch_fail.append(batch1) + if collect is True: + pRho = np.ones(batch_size) + pRho = pRho.transpose() + pRho = pd.Series(pRho) + fileio.save(pRho, batch + 'pRho' + outputsuffix + + file_extentions) - Rho = np.zeros(batch_size) - Rho = Rho.transpose() - Rho = pd.Series(Rho) - fileio.save(Rho, batch + 'Rho' + outputsuffix + file_extentions) + Rho = np.zeros(batch_size) + Rho = Rho.transpose() + Rho = pd.Series(Rho) + fileio.save(Rho, batch + 'Rho' + outputsuffix + + file_extentions) - rmse = np.zeros(batch_size) - rmse = rmse.transpose() - rmse = pd.Series(rmse) - fileio.save(rmse, batch + 'RMSE' + outputsuffix + file_extentions) + rmse = np.zeros(batch_size) + rmse = rmse.transpose() + rmse = pd.Series(rmse) + fileio.save(rmse, batch + 'RMSE' + outputsuffix + + file_extentions) - smse = np.zeros(batch_size) - smse = smse.transpose() - smse = pd.Series(smse) - fileio.save(smse, batch + 'SMSE' + outputsuffix + file_extentions) + smse = np.zeros(batch_size) + smse = smse.transpose() + smse = pd.Series(smse) + fileio.save(smse, batch + 'SMSE' + outputsuffix + + file_extentions) - expv = np.zeros(batch_size) - expv = expv.transpose() - expv = pd.Series(expv) - fileio.save(expv, batch + 'EXPV' + outputsuffix + file_extentions) + expv = np.zeros(batch_size) + expv = expv.transpose() + expv = pd.Series(expv) + fileio.save(expv, batch + 'EXPV' + outputsuffix + + file_extentions) - msll = np.zeros(batch_size) - msll = msll.transpose() - msll = pd.Series(msll) - fileio.save(msll, batch + 'MSLL' + outputsuffix + file_extentions) + msll = np.zeros(batch_size) + msll = msll.transpose() + msll = pd.Series(msll) + fileio.save(msll, batch + 'MSLL' + outputsuffix + + file_extentions) - yhat = np.zeros([numsubjects, batch_size]) - yhat = pd.DataFrame(yhat) - fileio.save(yhat, batch + 'yhat' + outputsuffix + file_extentions) + yhat = np.zeros([numsubjects, batch_size]) + yhat = pd.DataFrame(yhat) + fileio.save(yhat, batch + 'yhat' + outputsuffix + + file_extentions) - ys2 = np.zeros([numsubjects, batch_size]) - ys2 = pd.DataFrame(ys2) - fileio.save(ys2, batch + 'ys2' + outputsuffix + file_extentions) + ys2 = np.zeros([numsubjects, batch_size]) + ys2 = pd.DataFrame(ys2) + fileio.save(ys2, batch + 'ys2' + outputsuffix + + file_extentions) - Z = np.zeros([numsubjects, batch_size]) - Z = pd.DataFrame(Z) - fileio.save(Z, batch + 'Z' + outputsuffix + file_extentions) + Z = np.zeros([numsubjects, batch_size]) + Z = pd.DataFrame(Z) + fileio.save(Z, batch + 'Z' + outputsuffix + + file_extentions) + + nll = np.zeros(batch_size) + nll = nll.transpose() + nll = pd.Series(nll) + fileio.save(nll, batch + 'NLL' + outputsuffix + + file_extentions) + + bic = np.zeros(batch_size) + bic = bic.transpose() + bic = pd.Series(bic) + fileio.save(bic, batch + 'BIC' + outputsuffix + + file_extentions) - if not os.path.isdir(batch + 'Models'): - os.mkdir('Models') + if not os.path.isdir(batch + 'Models'): + os.mkdir('Models') - else: # if more than 10% of yhat is nan then consider the batch as a failed batch - yhat = fileio.load(filepath[0]) - if np.count_nonzero(~np.isnan(yhat))/(np.prod(yhat.shape))<0.9: - count = count+1 - batch1 = glob.glob(batch + '/' + job_name + '*.sh') - print('More than 10% nans in '+ batch1[0]) - batch_fail.append(batch1) + else: # if more than 10% of yhat is nan then it is a failed batch + yhat = fileio.load(filepath[0]) + if np.count_nonzero(~np.isnan(yhat))/(np.prod(yhat.shape))<0.9: + count = count+1 + batch1 = glob.glob(batch + '/' + job_name + '*.sh') + print('More than 10% nans in '+ batch1[0]) + batch_fail.append(batch1) - # combines all output files across batches - if collect is True: - pRho_filenames = glob.glob(processing_dir + 'batch_*/' + 'pRho' + - outputsuffix + '*') - if pRho_filenames: - pRho_filenames = fileio.sort_nicely(pRho_filenames) - pRho_dfs = [] - for pRho_filename in pRho_filenames: - pRho_dfs.append(pd.DataFrame(fileio.load(pRho_filename))) - pRho_dfs = pd.concat(pRho_dfs, ignore_index=True, axis=0) - fileio.save(pRho_dfs, processing_dir + 'pRho' + outputsuffix + - file_extentions) - del pRho_dfs - - Rho_filenames = glob.glob(processing_dir + 'batch_*/' + 'Rho' + - outputsuffix + '*') - if Rho_filenames: - Rho_filenames = fileio.sort_nicely(Rho_filenames) - Rho_dfs = [] - for Rho_filename in Rho_filenames: - Rho_dfs.append(pd.DataFrame(fileio.load(Rho_filename))) - Rho_dfs = pd.concat(Rho_dfs, ignore_index=True, axis=0) - fileio.save(Rho_dfs, processing_dir + 'Rho' + outputsuffix + - file_extentions) - del Rho_dfs - - Z_filenames = glob.glob(processing_dir + 'batch_*/' + 'Z' + - outputsuffix + '*') - if Z_filenames: - Z_filenames = fileio.sort_nicely(Z_filenames) - Z_dfs = [] - for Z_filename in Z_filenames: - Z_dfs.append(pd.DataFrame(fileio.load(Z_filename))) - Z_dfs = pd.concat(Z_dfs, ignore_index=True, axis=1) - fileio.save(Z_dfs, processing_dir + 'Z' + outputsuffix + - file_extentions) - del Z_dfs + else: + batch_dirs = glob.glob(processing_dir + 'batch_*/') + batch_dirs = fileio.sort_nicely(batch_dirs) + for batch in batch_dirs: + filepath = glob.glob(batch + 'Models/' + 'NM_' + '*' + outputsuffix + + '*') + if len(filepath) < batch_size: + count = count+1 + batch1 = glob.glob(batch + '/' + job_name + '*.sh') + print(batch1) + batch_fail.append(batch1) + + # combines all output files across batches + if collect is True: + pRho_filenames = glob.glob(processing_dir + 'batch_*/' + 'pRho' + + outputsuffix + '*') + if pRho_filenames: + pRho_filenames = fileio.sort_nicely(pRho_filenames) + pRho_dfs = [] + for pRho_filename in pRho_filenames: + pRho_dfs.append(pd.DataFrame(fileio.load(pRho_filename))) + pRho_dfs = pd.concat(pRho_dfs, ignore_index=True, axis=0) + fileio.save(pRho_dfs, processing_dir + 'pRho' + outputsuffix + + file_extentions) + del pRho_dfs + + Rho_filenames = glob.glob(processing_dir + 'batch_*/' + 'Rho' + + outputsuffix + '*') + if Rho_filenames: + Rho_filenames = fileio.sort_nicely(Rho_filenames) + Rho_dfs = [] + for Rho_filename in Rho_filenames: + Rho_dfs.append(pd.DataFrame(fileio.load(Rho_filename))) + Rho_dfs = pd.concat(Rho_dfs, ignore_index=True, axis=0) + fileio.save(Rho_dfs, processing_dir + 'Rho' + outputsuffix + + file_extentions) + del Rho_dfs + + Z_filenames = glob.glob(processing_dir + 'batch_*/' + 'Z' + + outputsuffix + '*') + if Z_filenames: + Z_filenames = fileio.sort_nicely(Z_filenames) + Z_dfs = [] + for Z_filename in Z_filenames: + Z_dfs.append(pd.DataFrame(fileio.load(Z_filename))) + Z_dfs = pd.concat(Z_dfs, ignore_index=True, axis=1) + fileio.save(Z_dfs, processing_dir + 'Z' + outputsuffix + + file_extentions) + del Z_dfs + + yhat_filenames = glob.glob(processing_dir + 'batch_*/' + 'yhat' + + outputsuffix + '*') + if yhat_filenames: + yhat_filenames = fileio.sort_nicely(yhat_filenames) + yhat_dfs = [] + for yhat_filename in yhat_filenames: + yhat_dfs.append(pd.DataFrame(fileio.load(yhat_filename))) + yhat_dfs = pd.concat(yhat_dfs, ignore_index=True, axis=1) + fileio.save(yhat_dfs, processing_dir + 'yhat' + outputsuffix + + file_extentions) + del yhat_dfs + + ys2_filenames = glob.glob(processing_dir + 'batch_*/' + 'ys2' + + outputsuffix + '*') + if ys2_filenames: + ys2_filenames = fileio.sort_nicely(ys2_filenames) + ys2_dfs = [] + for ys2_filename in ys2_filenames: + ys2_dfs.append(pd.DataFrame(fileio.load(ys2_filename))) + ys2_dfs = pd.concat(ys2_dfs, ignore_index=True, axis=1) + fileio.save(ys2_dfs, processing_dir + 'ys2' + outputsuffix + + file_extentions) + del ys2_dfs + + rmse_filenames = glob.glob(processing_dir + 'batch_*/' + 'RMSE' + + outputsuffix + '*') + if rmse_filenames: + rmse_filenames = fileio.sort_nicely(rmse_filenames) + rmse_dfs = [] + for rmse_filename in rmse_filenames: + rmse_dfs.append(pd.DataFrame(fileio.load(rmse_filename))) + rmse_dfs = pd.concat(rmse_dfs, ignore_index=True, axis=0) + fileio.save(rmse_dfs, processing_dir + 'RMSE' + outputsuffix + + file_extentions) + del rmse_dfs + + smse_filenames = glob.glob(processing_dir + 'batch_*/' + 'SMSE' + + outputsuffix + '*') + if smse_filenames: + smse_filenames = fileio.sort_nicely(smse_filenames) + smse_dfs = [] + for smse_filename in smse_filenames: + smse_dfs.append(pd.DataFrame(fileio.load(smse_filename))) + smse_dfs = pd.concat(smse_dfs, ignore_index=True, axis=0) + fileio.save(smse_dfs, processing_dir + 'SMSE' + outputsuffix + + file_extentions) + del smse_dfs - yhat_filenames = glob.glob(processing_dir + 'batch_*/' + 'yhat' + - outputsuffix + '*') - if yhat_filenames: - yhat_filenames = fileio.sort_nicely(yhat_filenames) - yhat_dfs = [] - for yhat_filename in yhat_filenames: - yhat_dfs.append(pd.DataFrame(fileio.load(yhat_filename))) - yhat_dfs = pd.concat(yhat_dfs, ignore_index=True, axis=1) - fileio.save(yhat_dfs, processing_dir + 'yhat' + outputsuffix + - file_extentions) - del yhat_dfs - - ys2_filenames = glob.glob(processing_dir + 'batch_*/' + 'ys2' + - outputsuffix + '*') - if ys2_filenames: - ys2_filenames = fileio.sort_nicely(ys2_filenames) - ys2_dfs = [] - for ys2_filename in ys2_filenames: - ys2_dfs.append(pd.DataFrame(fileio.load(ys2_filename))) - ys2_dfs = pd.concat(ys2_dfs, ignore_index=True, axis=1) - fileio.save(ys2_dfs, processing_dir + 'ys2' + outputsuffix + - file_extentions) - del ys2_dfs - - rmse_filenames = glob.glob(processing_dir + 'batch_*/' + 'RMSE' + - outputsuffix + '*') - if rmse_filenames: - rmse_filenames = fileio.sort_nicely(rmse_filenames) - rmse_dfs = [] - for rmse_filename in rmse_filenames: - rmse_dfs.append(pd.DataFrame(fileio.load(rmse_filename))) - rmse_dfs = pd.concat(rmse_dfs, ignore_index=True, axis=0) - fileio.save(rmse_dfs, processing_dir + 'RMSE' + outputsuffix + - file_extentions) - del rmse_dfs - - smse_filenames = glob.glob(processing_dir + 'batch_*/' + 'SMSE' + - outputsuffix + '*') - if smse_filenames: - smse_filenames = fileio.sort_nicely(smse_filenames) - smse_dfs = [] - for smse_filename in smse_filenames: - smse_dfs.append(pd.DataFrame(fileio.load(smse_filename))) - smse_dfs = pd.concat(smse_dfs, ignore_index=True, axis=0) - fileio.save(smse_dfs, processing_dir + 'SMSE' + outputsuffix + - file_extentions) - del smse_dfs + expv_filenames = glob.glob(processing_dir + 'batch_*/' + 'EXPV' + + outputsuffix + '*') + if expv_filenames: + expv_filenames = fileio.sort_nicely(expv_filenames) + expv_dfs = [] + for expv_filename in expv_filenames: + expv_dfs.append(pd.DataFrame(fileio.load(expv_filename))) + expv_dfs = pd.concat(expv_dfs, ignore_index=True, axis=0) + fileio.save(expv_dfs, processing_dir + 'EXPV' + outputsuffix + + file_extentions) + del expv_dfs - expv_filenames = glob.glob(processing_dir + 'batch_*/' + 'EXPV' + - outputsuffix + '*') - if expv_filenames: - expv_filenames = fileio.sort_nicely(expv_filenames) - expv_dfs = [] - for expv_filename in expv_filenames: - expv_dfs.append(pd.DataFrame(fileio.load(expv_filename))) - expv_dfs = pd.concat(expv_dfs, ignore_index=True, axis=0) - fileio.save(expv_dfs, processing_dir + 'EXPV' + outputsuffix + - file_extentions) - del expv_dfs + msll_filenames = glob.glob(processing_dir + 'batch_*/' + 'MSLL' + + outputsuffix + '*') + if msll_filenames: + msll_filenames = fileio.sort_nicely(msll_filenames) + msll_dfs = [] + for msll_filename in msll_filenames: + msll_dfs.append(pd.DataFrame(fileio.load(msll_filename))) + msll_dfs = pd.concat(msll_dfs, ignore_index=True, axis=0) + fileio.save(msll_dfs, processing_dir + 'MSLL' + outputsuffix + + file_extentions) + del msll_dfs - msll_filenames = glob.glob(processing_dir + 'batch_*/' + 'MSLL' + - outputsuffix + '*') - if msll_filenames: - msll_filenames = fileio.sort_nicely(msll_filenames) - msll_dfs = [] - for msll_filename in msll_filenames: - msll_dfs.append(pd.DataFrame(fileio.load(msll_filename))) - msll_dfs = pd.concat(msll_dfs, ignore_index=True, axis=0) - fileio.save(msll_dfs, processing_dir + 'MSLL' + outputsuffix + - file_extentions) - del msll_dfs + nll_filenames = glob.glob(processing_dir + 'batch_*/' + 'NLL' + + outputsuffix + '*') + if nll_filenames: + nll_filenames = fileio.sort_nicely(nll_filenames) + nll_dfs = [] + for nll_filename in nll_filenames: + nll_dfs.append(pd.DataFrame(fileio.load(nll_filename))) + nll_dfs = pd.concat(nll_dfs, ignore_index=True, axis=0) + fileio.save(nll_dfs, processing_dir + 'NLL' + outputsuffix + + file_extentions) + del nll_dfs + + bic_filenames = glob.glob(processing_dir + 'batch_*/' + 'BIC' + + outputsuffix + '*') + if bic_filenames: + bic_filenames = fileio.sort_nicely(bic_filenames) + bic_dfs = [] + for bic_filename in bic_filenames: + bic_dfs.append(pd.DataFrame(fileio.load(bic_filename))) + bic_dfs = pd.concat(bic_dfs, ignore_index=True, axis=0) + fileio.save(bic_dfs, processing_dir + 'BIC' + outputsuffix + + file_extentions) + del bic_dfs - if func != 'predict' and func != 'transfer': - if not os.path.isdir(processing_dir + 'Models') and \ - os.path.exists(os.path.join(batches[0], 'Models')): - os.mkdir(processing_dir + 'Models') + if func != 'predict' and func != 'extend': + if not os.path.isdir(processing_dir + 'Models') and \ + os.path.exists(os.path.join(batches[0], 'Models')): + os.mkdir(processing_dir + 'Models') - meta_filenames = glob.glob(processing_dir + 'batch_*/Models/' + 'meta_data.md') - mY = [] - sY = [] - mX = [] - sX = [] - if meta_filenames: - meta_filenames = fileio.sort_nicely(meta_filenames) - with open(meta_filenames[0], 'rb') as file: - meta_data = pickle.load(file) - if meta_data['standardize']: - for meta_filename in meta_filenames: - mY.append(meta_data['mean_resp']) - sY.append(meta_data['std_resp']) - mX.append(meta_data['mean_cov']) - sX.append(meta_data['std_cov']) - meta_data['mean_resp'] = np.stack(mY) - meta_data['std_resp'] = np.stack(sY) - meta_data['mean_cov'] = np.stack(mX) - meta_data['std_cov'] = np.stack(sX) - - with open(os.path.join(processing_dir, 'Models', 'meta_data.md'), - 'wb') as file: - pickle.dump(meta_data, file, protocol=PICKLE_PROTOCOL) + meta_filenames = glob.glob(processing_dir + 'batch_*/Models/' + + 'meta_data.md') + mY = [] + sY = [] + X_scalers = [] + Y_scalers = [] + if meta_filenames: + meta_filenames = fileio.sort_nicely(meta_filenames) + with open(meta_filenames[0], 'rb') as file: + meta_data = pickle.load(file) + + for meta_filename in meta_filenames: + with open(meta_filename, 'rb') as file: + meta_data = pickle.load(file) + mY.append(meta_data['mean_resp']) + sY.append(meta_data['std_resp']) + if meta_data['inscaler'] in ['standardize', 'minmax', + 'robminmax']: + X_scalers.append(meta_data['scaler_cov']) + if meta_data['outscaler'] in ['standardize', 'minmax', + 'robminmax']: + Y_scalers.append(meta_data['scaler_resp']) + meta_data['mean_resp'] = np.squeeze(np.stack(mY)) + meta_data['std_resp'] = np.squeeze(np.stack(sY)) + meta_data['scaler_cov'] = X_scalers + meta_data['scaler_resp'] = Y_scalers + + with open(os.path.join(processing_dir, 'Models', + 'meta_data.md'), 'wb') as file: + pickle.dump(meta_data, file, protocol=PICKLE_PROTOCOL) - batch_dirs = glob.glob(processing_dir + 'batch_*/') - if batch_dirs: - batch_dirs = fileio.sort_nicely(batch_dirs) - for b, batch_dir in enumerate(batch_dirs): - src_files = glob.glob(batch_dir + 'Models/NM*' + outputsuffix + '.pkl') - if src_files: - src_files = fileio.sort_nicely(src_files) - for f, full_file_name in enumerate(src_files): - if os.path.isfile(full_file_name): - file_name = full_file_name.split('/')[-1] - n = file_name.split('_') - n[-2] = str(b * batch_size + f) - n = '_'.join(n) - shutil.copy(full_file_name, processing_dir + 'Models/' + n) - elif func=='fit': - count = count+1 - batch1 = glob.glob(batch_dir + '/' + job_name + '*.sh') - print('Failed batch: ' + batch1[0]) - batch_fail.append(batch1) + batch_dirs = glob.glob(processing_dir + 'batch_*/') + if batch_dirs: + batch_dirs = fileio.sort_nicely(batch_dirs) + for b, batch_dir in enumerate(batch_dirs): + src_files = glob.glob(batch_dir + 'Models/NM*' + + outputsuffix + '.pkl') + if src_files: + src_files = fileio.sort_nicely(src_files) + for f, full_file_name in enumerate(src_files): + if os.path.isfile(full_file_name): + file_name = full_file_name.split('/')[-1] + n = file_name.split('_') + n[-2] = str(b * batch_size + f) + n = '_'.join(n) + shutil.copy(full_file_name, processing_dir + + 'Models/' + n) + elif func=='fit': + count = count+1 + batch1 = glob.glob(batch_dir + '/' + job_name + '*.sh') + print('Failed batch: ' + batch1[0]) + batch_fail.append(batch1) - # list batches that were not executed - print('Number of batches that failed:' + str(count)) - batch_fail_df = pd.DataFrame(batch_fail) - if file_extentions == '.txt': - fileio.save_pd(batch_fail_df, processing_dir + 'failed_batches'+ - file_extentions) - else: - fileio.save(batch_fail_df, processing_dir + - 'failed_batches' + - file_extentions) - - if not batch_fail: - return 1 - else: - return 0
- -
[docs]def delete_nm(processing_dir, - binary=False): - """This function deletes all processing for normative modelling and just - keeps the combined output. - - :Parameters: - * processing_dir -> Full path to the processing directory - * binary -> Results in pkl format? - - written by (primarily) T Wolfers, (adapted) SM Kia - """ + # list batches that were not executed + print('Number of batches that failed:' + str(count)) + batch_fail_df = pd.DataFrame(batch_fail) + if file_extentions == '.txt': + fileio.save_pd(batch_fail_df, processing_dir + 'failed_batches'+ + file_extentions) + else: + fileio.save(batch_fail_df, processing_dir + + 'failed_batches' + + file_extentions) + + if not batch_fail: + return 1 + else: + return 0
+ +
[docs]def delete_nm(processing_dir, + binary=False): + """This function deletes all processing for normative modelling and just + keeps the combined output. + + :Parameters: + * processing_dir -> Full path to the processing directory + * binary -> Results in pkl format? + + written by (primarily) T Wolfers, (adapted) SM Kia + """ - if binary: - file_extentions = '.pkl' - else: - file_extentions = '.txt' - for file in glob.glob(processing_dir + 'batch_*/'): - shutil.rmtree(file) - if os.path.exists(processing_dir + 'failed_batches' + file_extentions): - os.remove(processing_dir + 'failed_batches' + file_extentions)
- - -# all routines below are envronment dependent and require adaptation in novel -# environments -> copy those routines and adapt them in accrodance with your -# environment - -
[docs]def bashwrap_nm(processing_dir, - python_path, - normative_path, - job_name, - covfile_path, - respfile_path, - func='estimate', - **kwargs): - - """ This function wraps normative modelling into a bash script to run it - on a torque cluster system. - - :Parameters: - * processing_dir -> Full path to the processing dir - * python_path -> Full path to the python distribution - * normative_path -> Full path to the normative.py - * job_name -> Name for the bash script that is the output of - this function - * covfile_path -> Full path to a .txt file that contains all - covariats (subjects x covariates) for the - responsefile - * respfile_path -> Full path to a .txt that contains all features - (subjects x features) - * cv_folds -> Number of cross validations - * testcovfile_path -> Full path to a .txt file that contains all - covariats (subjects x covariates) for the - testresponse file - * testrespfile_path -> Full path to a .txt file that contains all - test features - * alg -> which algorithm to use - * configparam -> configuration parameters for this algorithm - - :outputs: - * A bash.sh file containing the commands for normative modelling saved - to the processing directory (written to disk) - - written by (primarily) T Wolfers - """ + if binary: + file_extentions = '.pkl' + else: + file_extentions = '.txt' + for file in glob.glob(processing_dir + 'batch_*/'): + shutil.rmtree(file) + if os.path.exists(processing_dir + 'failed_batches' + file_extentions): + os.remove(processing_dir + 'failed_batches' + file_extentions)
+ + +# all routines below are envronment dependent and require adaptation in novel +# environments -> copy those routines and adapt them in accrodance with your +# environment + +
[docs]def bashwrap_nm(processing_dir, + python_path, + normative_path, + job_name, + covfile_path, + respfile_path, + func='estimate', + **kwargs): + + """ This function wraps normative modelling into a bash script to run it + on a torque cluster system. + + :Parameters: + * processing_dir -> Full path to the processing dir + * python_path -> Full path to the python distribution + * normative_path -> Full path to the normative.py + * job_name -> Name for the bash script that is the output of + this function + * covfile_path -> Full path to a .txt file that contains all + covariats (subjects x covariates) for the + responsefile + * respfile_path -> Full path to a .txt that contains all features + (subjects x features) + * cv_folds -> Number of cross validations + * testcovfile_path -> Full path to a .txt file that contains all + covariats (subjects x covariates) for the + testresponse file + * testrespfile_path -> Full path to a .txt file that contains all + test features + * alg -> which algorithm to use + * configparam -> configuration parameters for this algorithm + + :outputs: + * A bash.sh file containing the commands for normative modelling saved + to the processing directory (written to disk) + + written by (primarily) T Wolfers + """ - # here we use pop not get to remove the arguments as they used - cv_folds = kwargs.pop('cv_folds',None) - testcovfile_path = kwargs.pop('testcovfile_path', None) - testrespfile_path = kwargs.pop('testrespfile_path', None) - alg = kwargs.pop('alg', None) - configparam = kwargs.pop('configparam', None) - standardize = kwargs.pop('standardize', True) + # here we use pop not get to remove the arguments as they used + cv_folds = kwargs.pop('cv_folds',None) + testcovfile_path = kwargs.pop('testcovfile_path', None) + testrespfile_path = kwargs.pop('testrespfile_path', None) + alg = kwargs.pop('alg', None) + configparam = kwargs.pop('configparam', None) + standardize = kwargs.pop('standardize', True) - # change to processing dir - os.chdir(processing_dir) - output_changedir = ['cd ' + processing_dir + '\n'] - - bash_lines = '#!/bin/bash\n' - bash_cores = 'export OMP_NUM_THREADS=1\n' - bash_environment = [bash_lines + bash_cores] - - # creates call of function for normative modelling - if (testrespfile_path is not None) and (testcovfile_path is not None): - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -t ' + testcovfile_path + ' -r ' + - testrespfile_path + ' -f ' + func] - elif (testrespfile_path is None) and (testcovfile_path is not None): - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -t ' + testcovfile_path + ' -f ' + func] - elif cv_folds is not None: - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -k ' + str(cv_folds) + ' -f ' + func] - elif func != 'estimate': - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -f ' + func] - else: - raise(ValueError, """For 'estimate' function either testcov or cvfold - must be specified.""") + # change to processing dir + os.chdir(processing_dir) + output_changedir = ['cd ' + processing_dir + '\n'] + + bash_lines = '#!/bin/bash\n' + bash_cores = 'export OMP_NUM_THREADS=1\n' + bash_environment = [bash_lines + bash_cores] + + # creates call of function for normative modelling + if (testrespfile_path is not None) and (testcovfile_path is not None): + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -t ' + testcovfile_path + ' -r ' + + testrespfile_path + ' -f ' + func] + elif (testrespfile_path is None) and (testcovfile_path is not None): + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -t ' + testcovfile_path + ' -f ' + func] + elif cv_folds is not None: + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -k ' + str(cv_folds) + ' -f ' + func] + elif func != 'estimate': + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -f ' + func] + else: + raise(ValueError, """For 'estimate' function either testcov or cvfold + must be specified.""") - # add algorithm-specific parameters - if alg is not None: - job_call = [job_call[0] + ' -a ' + alg] - if configparam is not None: - job_call = [job_call[0] + ' -x ' + str(configparam)] + # add algorithm-specific parameters + if alg is not None: + job_call = [job_call[0] + ' -a ' + alg] + if configparam is not None: + job_call = [job_call[0] + ' -x ' + str(configparam)] - # add standardization flag if it is false - if not standardize: - job_call = [job_call[0] + ' -s'] + # add standardization flag if it is false + # if not standardize: + # job_call = [job_call[0] + ' -s'] - # add responses file - job_call = [job_call[0] + ' ' + respfile_path] + # add responses file + job_call = [job_call[0] + ' ' + respfile_path] - # add in optional arguments. - for k in kwargs: - job_call = [job_call[0] + ' ' + k + '=' + kwargs[k]] - - # writes bash file into processing dir - with open(processing_dir+job_name, 'w') as bash_file: - bash_file.writelines(bash_environment + output_changedir + \ - job_call + ["\n"]) - - # changes permissoins for bash.sh file - os.chmod(processing_dir + job_name, 0o700)
- - -
[docs]def qsub_nm(job_path, - log_path, - memory, - duration): - """ - This function submits a job.sh scipt to the torque custer using the qsub - command. - - ** Input: - * job_path -> Full path to the job.sh file - * memory -> Memory requirements written as string for example - 4gb or 500mb - * duation -> The approximate duration of the job, a string with - HH:MM:SS for example 01:01:01 - - ** Output: - * Submission of the job to the (torque) cluster - - witten by (primarily) T Wolfers, (adapted) SM Kia - """ - - # created qsub command - if log_path is None: - qsub_call = ['echo ' + job_path + ' | qsub -N ' + job_path + ' -l ' + - 'procs=1' + ',mem=' + memory + ',walltime=' + duration] - else: - qsub_call = ['echo ' + job_path + ' | qsub -N ' + job_path + - ' -l ' + 'procs=1' + ',mem=' + memory + ',walltime=' + duration + - ' -o ' + log_path + ' -e ' + log_path] - - # submits job to cluster - call(qsub_call, shell=True)
- - -
[docs]def rerun_nm(processing_dir, - log_path, - memory, - duration, - binary=False): - """ - This function reruns all failed batched in processing_dir after collect_nm - has identified he failed batches - - * Input: - * processing_dir -> Full path to the processing directory - * memory -> Memory requirements written as string - for example 4gb or 500mb - * duration -> The approximate duration of the job, a - string with HH:MM:SS for example 01:01:01 - - written by (primarily) T Wolfers, (adapted) SM Kia - """ - - if binary: - file_extentions = '.pkl' - failed_batches = fileio.load(processing_dir + - 'failed_batches' + file_extentions) - shape = failed_batches.shape - for n in range(0, shape[0]): - jobpath = failed_batches[n, 0] - print(jobpath) - qsub_nm(job_path=jobpath, - log_path=log_path, - memory=memory, - duration=duration) - else: - file_extentions = '.txt' - failed_batches = fileio.load_pd(processing_dir + - 'failed_batches' + file_extentions) - shape = failed_batches.shape - for n in range(0, shape[0]): - jobpath = failed_batches.iloc[n, 0] - print(jobpath) - qsub_nm(job_path=jobpath, - log_path=log_path, - memory=memory, - duration=duration)
- -# COPY the rotines above here and aadapt those to your cluster -# bashwarp_nm; qsub_nm; rerun_nm - -
[docs]def sbatchwrap_nm(processing_dir, - python_path, - normative_path, - job_name, - covfile_path, - respfile_path, - memory, - duration, - func='estimate', - **kwargs): - - """ This function wraps normative modelling into a bash script to run it - on a torque cluster system. - - :Parameters: - * processing_dir -> Full path to the processing dir - * python_path -> Full path to the python distribution - * normative_path -> Full path to the normative.py - * job_name -> Name for the bash script that is the output of - this function - * covfile_path -> Full path to a .txt file that contains all - covariats (subjects x covariates) for the - responsefile - * respfile_path -> Full path to a .txt that contains all features - (subjects x features) - * cv_folds -> Number of cross validations - * testcovfile_path -> Full path to a .txt file that contains all - covariats (subjects x covariates) for the - testresponse file - * testrespfile_path -> Full path to a .txt file that contains all - test features - * alg -> which algorithm to use - * configparam -> configuration parameters for this algorithm - - :outputs: - * A bash.sh file containing the commands for normative modelling saved - to the processing directory (written to disk) - - written by (primarily) T Wolfers - """ + # add in optional arguments. + for k in kwargs: + job_call = [job_call[0] + ' ' + k + '=' + kwargs[k]] + + # writes bash file into processing dir + with open(processing_dir+job_name, 'w') as bash_file: + bash_file.writelines(bash_environment + output_changedir + \ + job_call + ["\n"]) + + # changes permissoins for bash.sh file + os.chmod(processing_dir + job_name, 0o700)
+ + +
[docs]def qsub_nm(job_path, + log_path, + memory, + duration): + """ + This function submits a job.sh scipt to the torque custer using the qsub + command. + + ** Input: + * job_path -> Full path to the job.sh file + * memory -> Memory requirements written as string for example + 4gb or 500mb + * duation -> The approximate duration of the job, a string with + HH:MM:SS for example 01:01:01 + + ** Output: + * Submission of the job to the (torque) cluster + + witten by (primarily) T Wolfers, (adapted) SM Kia + """ + + # created qsub command + if log_path is None: + qsub_call = ['echo ' + job_path + ' | qsub -N ' + job_path + ' -l ' + + 'procs=1' + ',mem=' + memory + ',walltime=' + duration] + else: + qsub_call = ['echo ' + job_path + ' | qsub -N ' + job_path + + ' -l ' + 'procs=1' + ',mem=' + memory + ',walltime=' + + duration + ' -o ' + log_path + ' -e ' + log_path] + + # submits job to cluster + call(qsub_call, shell=True)
+ + +
[docs]def rerun_nm(processing_dir, + log_path, + memory, + duration, + binary=False): + """ + This function reruns all failed batched in processing_dir after collect_nm + has identified he failed batches + + * Input: + * processing_dir -> Full path to the processing directory + * memory -> Memory requirements written as string + for example 4gb or 500mb + * duration -> The approximate duration of the job, a + string with HH:MM:SS for example 01:01:01 + + written by (primarily) T Wolfers, (adapted) SM Kia + """ + + if binary: + file_extentions = '.pkl' + failed_batches = fileio.load(processing_dir + + 'failed_batches' + file_extentions) + shape = failed_batches.shape + for n in range(0, shape[0]): + jobpath = failed_batches[n, 0] + print(jobpath) + qsub_nm(job_path=jobpath, + log_path=log_path, + memory=memory, + duration=duration) + else: + file_extentions = '.txt' + failed_batches = fileio.load_pd(processing_dir + + 'failed_batches' + file_extentions) + shape = failed_batches.shape + for n in range(0, shape[0]): + jobpath = failed_batches.iloc[n, 0] + print(jobpath) + qsub_nm(job_path=jobpath, + log_path=log_path, + memory=memory, + duration=duration)
+ +# COPY the rotines above here and aadapt those to your cluster +# bashwarp_nm; qsub_nm; rerun_nm + +
[docs]def sbatchwrap_nm(processing_dir, + python_path, + normative_path, + job_name, + covfile_path, + respfile_path, + memory, + duration, + func='estimate', + **kwargs): + + """ This function wraps normative modelling into a bash script to run it + on a torque cluster system. + + :Parameters: + * processing_dir -> Full path to the processing dir + * python_path -> Full path to the python distribution + * normative_path -> Full path to the normative.py + * job_name -> Name for the bash script that is the output of + this function + * covfile_path -> Full path to a .txt file that contains all + covariats (subjects x covariates) for the + responsefile + * respfile_path -> Full path to a .txt that contains all features + (subjects x features) + * cv_folds -> Number of cross validations + * testcovfile_path -> Full path to a .txt file that contains all + covariats (subjects x covariates) for the + testresponse file + * testrespfile_path -> Full path to a .txt file that contains all + test features + * alg -> which algorithm to use + * configparam -> configuration parameters for this algorithm + + :outputs: + * A bash.sh file containing the commands for normative modelling saved + to the processing directory (written to disk) + + written by (primarily) T Wolfers + """ - # here we use pop not get to remove the arguments as they used - cv_folds = kwargs.pop('cv_folds',None) - testcovfile_path = kwargs.pop('testcovfile_path', None) - testrespfile_path = kwargs.pop('testrespfile_path', None) - alg = kwargs.pop('alg', None) - configparam = kwargs.pop('configparam', None) - standardize = kwargs.pop('standardize', True) + # here we use pop not get to remove the arguments as they used + cv_folds = kwargs.pop('cv_folds',None) + testcovfile_path = kwargs.pop('testcovfile_path', None) + testrespfile_path = kwargs.pop('testrespfile_path', None) + alg = kwargs.pop('alg', None) + configparam = kwargs.pop('configparam', None) + standardize = kwargs.pop('standardize', True) - # change to processing dir - os.chdir(processing_dir) - output_changedir = ['cd ' + processing_dir + '\n'] - - sbatch_init='#!/bin/bash\n' - sbatch_jobname='#SBATCH --job-name=' + processing_dir + '\n' - sbatch_account='#SBATCH --account=p33_norment\n' - sbatch_nodes='#SBATCH --nodes=1\n' - sbatch_tasks='#SBATCH --ntasks=1\n' - sbatch_time='#SBATCH --time=' + str(duration) + '\n' - sbatch_memory='#SBATCH --mem-per-cpu=' + str(memory) + '\n' - sbatch_module='module purge\n' - sbatch_anaconda='module load anaconda3\n' - sbatch_exit='set -o errexit\n' - - #echo -n "This script is running on " - #hostname + # change to processing dir + os.chdir(processing_dir) + output_changedir = ['cd ' + processing_dir + '\n'] + + sbatch_init='#!/bin/bash\n' + sbatch_jobname='#SBATCH --job-name=' + processing_dir + '\n' + sbatch_account='#SBATCH --account=p33_norment\n' + sbatch_nodes='#SBATCH --nodes=1\n' + sbatch_tasks='#SBATCH --ntasks=1\n' + sbatch_time='#SBATCH --time=' + str(duration) + '\n' + sbatch_memory='#SBATCH --mem-per-cpu=' + str(memory) + '\n' + sbatch_module='module purge\n' + sbatch_anaconda='module load anaconda3\n' + sbatch_exit='set -o errexit\n' + + #echo -n "This script is running on " + #hostname - bash_environment = [sbatch_init + - sbatch_jobname + - sbatch_account + - sbatch_nodes + - sbatch_tasks + - sbatch_time + - sbatch_module + - sbatch_anaconda] - - # creates call of function for normative modelling - if (testrespfile_path is not None) and (testcovfile_path is not None): - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -t ' + testcovfile_path + ' -r ' + - testrespfile_path + ' -f ' + func] - elif (testrespfile_path is None) and (testcovfile_path is not None): - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -t ' + testcovfile_path + ' -f ' + func] - elif cv_folds is not None: - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -k ' + str(cv_folds) + ' -f ' + func] - elif func != 'estimate': - job_call = [python_path + ' ' + normative_path + ' -c ' + - covfile_path + ' -f ' + func] - else: - raise(ValueError, """For 'estimate' function either testcov or cvfold - must be specified.""") + bash_environment = [sbatch_init + + sbatch_jobname + + sbatch_account + + sbatch_nodes + + sbatch_tasks + + sbatch_time + + sbatch_module + + sbatch_anaconda] + + # creates call of function for normative modelling + if (testrespfile_path is not None) and (testcovfile_path is not None): + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -t ' + testcovfile_path + ' -r ' + + testrespfile_path + ' -f ' + func] + elif (testrespfile_path is None) and (testcovfile_path is not None): + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -t ' + testcovfile_path + ' -f ' + func] + elif cv_folds is not None: + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -k ' + str(cv_folds) + ' -f ' + func] + elif func != 'estimate': + job_call = [python_path + ' ' + normative_path + ' -c ' + + covfile_path + ' -f ' + func] + else: + raise(ValueError, """For 'estimate' function either testcov or cvfold + must be specified.""") - # add algorithm-specific parameters - if alg is not None: - job_call = [job_call[0] + ' -a ' + alg] - if configparam is not None: - job_call = [job_call[0] + ' -x ' + str(configparam)] + # add algorithm-specific parameters + if alg is not None: + job_call = [job_call[0] + ' -a ' + alg] + if configparam is not None: + job_call = [job_call[0] + ' -x ' + str(configparam)] - # add standardization flag if it is false - if not standardize: - job_call = [job_call[0] + ' -s'] + # add standardization flag if it is false + # if not standardize: + # job_call = [job_call[0] + ' -s'] - # add responses file - job_call = [job_call[0] + ' ' + respfile_path] + # add responses file + job_call = [job_call[0] + ' ' + respfile_path] - # add in optional arguments. - for k in kwargs: - job_call = [job_call[0] + ' ' + k + '=' + kwargs[k]] + # add in optional arguments. + for k in kwargs: + job_call = [job_call[0] + ' ' + k + '=' + kwargs[k]] - # writes bash file into processing dir - with open(processing_dir+job_name, 'w') as bash_file: - bash_file.writelines(bash_environment + output_changedir + \ - job_call + ["\n"] + [sbatch_exit]) + # writes bash file into processing dir + with open(processing_dir+job_name, 'w') as bash_file: + bash_file.writelines(bash_environment + output_changedir + \ + job_call + ["\n"] + [sbatch_exit]) - # changes permissoins for bash.sh file - os.chmod(processing_dir + job_name, 0o700)
+ # changes permissoins for bash.sh file + os.chmod(processing_dir + job_name, 0o700)
-
[docs]def sbatch_nm(job_path, - log_path): - """ - This function submits a job.sh scipt to the torque custer using the qsub - command. +
[docs]def sbatch_nm(job_path, + log_path): + """ + This function submits a job.sh scipt to the torque custer using the qsub + command. - ** Input: - * job_path -> Full path to the job.sh file - * log_path -> The logs are currently stored in the working dir + ** Input: + * job_path -> Full path to the job.sh file + * log_path -> The logs are currently stored in the working dir - ** Output: - * Submission of the job to the (torque) cluster + ** Output: + * Submission of the job to the (torque) cluster - witten by (primarily) T Wolfers - """ + witten by (primarily) T Wolfers + """ - # created qsub command - sbatch_call = ['sbatch ' + job_path] + # created qsub command + sbatch_call = ['sbatch ' + job_path] - # submits job to cluster - call(sbatch_call, shell=True) + # submits job to cluster + call(sbatch_call, shell=True) - def rerun_nm(processing_dir, - memory, - duration, - new_memory=False, - new_duration=False, - binary=False, - **kwargs): - """ - This function reruns all failed batched in processing_dir after collect_nm - has identified he failed batches - - * Input: - * processing_dir -> Full path to the processing directory - * memory -> Memory requirements written as string - for example 4gb or 500mb - * duration -> The approximate duration of the job, a - string with HH:MM:SS for example 01:01:01 - * new_memory -> If you want to change the memory - you have to indicate it here. - * new_duration -> If you want to change the duration - you have to indicate it here. - * Outputs: - * Reruns failed batches. - - written by (primarily) T Wolfers - """ - log_path = kwargs.pop('log_path', None) + def rerun_nm(processing_dir, + memory, + duration, + new_memory=False, + new_duration=False, + binary=False, + **kwargs): + """ + This function reruns all failed batched in processing_dir after + collect_nm has identified he failed batches - if binary: - file_extentions = '.pkl' - failed_batches = fileio.load(processing_dir + - 'failed_batches' + - file_extentions) - shape = failed_batches.shape - for n in range(0, shape[0]): - jobpath = failed_batches[n, 0] - print(jobpath) - if new_duration != False: - with fileinput.FileInput(jobpath, inplace=True) as file: - for line in file: - print(line.replace(duration, new_duration), end='') - if new_memory != False: - with fileinput.FileInput(jobpath, inplace=True) as file: - for line in file: - print(line.replace(memory, new_memory), end='') - sbatch_nm(jobpath, - log_path) - else: - file_extentions = '.txt' - failed_batches = fileio.load_pd(processing_dir + - 'failed_batches' + file_extentions) - shape = failed_batches.shape - for n in range(0, shape[0]): - jobpath = failed_batches.iloc[n, 0] - print(jobpath) - if new_duration != False: - with fileinput.FileInput(jobpath, inplace=True) as file: - for line in file: - print(line.replace(duration, new_duration), end='') - if new_memory != False: - with fileinput.FileInput(jobpath, inplace=True) as file: - for line in file: - print(line.replace(memory, new_memory), end='') - sbatch_nm(jobpath, - log_path)
+ * Input: + * processing_dir -> Full path to the processing directory + * memory -> Memory requirements written as string + for example 4gb or 500mb + * duration -> The approximate duration of the job, a + string with HH:MM:SS for example 01:01:01 + * new_memory -> If you want to change the memory + you have to indicate it here. + * new_duration -> If you want to change the duration + you have to indicate it here. + * Outputs: + * Reruns failed batches. + + written by (primarily) T Wolfers + """ + log_path = kwargs.pop('log_path', None) + + if binary: + file_extentions = '.pkl' + failed_batches = fileio.load(processing_dir + + 'failed_batches' + + file_extentions) + shape = failed_batches.shape + for n in range(0, shape[0]): + jobpath = failed_batches[n, 0] + print(jobpath) + if new_duration != False: + with fileinput.FileInput(jobpath, inplace=True) as file: + for line in file: + print(line.replace(duration, new_duration), end='') + if new_memory != False: + with fileinput.FileInput(jobpath, inplace=True) as file: + for line in file: + print(line.replace(memory, new_memory), end='') + sbatch_nm(jobpath, + log_path) + else: + file_extentions = '.txt' + failed_batches = fileio.load_pd(processing_dir + + 'failed_batches' + file_extentions) + shape = failed_batches.shape + for n in range(0, shape[0]): + jobpath = failed_batches.iloc[n, 0] + print(jobpath) + if new_duration != False: + with fileinput.FileInput(jobpath, inplace=True) as file: + for line in file: + print(line.replace(duration, new_duration), end='') + if new_memory != False: + with fileinput.FileInput(jobpath, inplace=True) as file: + for line in file: + print(line.replace(memory, new_memory), end='') + sbatch_nm(jobpath, + log_path)
-
+
+
+
+ +
+ +
+

+ © Copyright 2020, Andre F. Marquand. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
- -
-
- - - + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_modules/rfa.html b/doc/build/html/_modules/rfa.html index f3be69e9..043ed317 100644 --- a/doc/build/html/_modules/rfa.html +++ b/doc/build/html/_modules/rfa.html @@ -1,331 +1,476 @@ - - - - - - - rfa — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + rfa — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
-
-
-
+ + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + -import numpy as np -import torch -
[docs]class GPRRFA: - """Random Feature Approximation for Gaussian Process Regression - Estimation and prediction of Bayesian linear regression models - Basic usage:: - R = GPRRFA() - hyp = R.estimate(hyp0, X, y) - ys,s2 = R.predict(hyp, X, y, Xs) - where the variables are - :param hyp: vector of hyperparmaters. - :param X: N x D data array - :param y: 1D Array of targets (length N) - :param Xs: Nte x D array of test cases - :param hyp0: starting estimates for hyperparameter optimisation - :returns: * ys - predictive mean - * s2 - predictive variance - The hyperparameters are:: - hyp = [ log(sn), log(ell), log(sf) ] # hyp is a numpy array +
- where sn^2 is the noise variance, ell are lengthscale parameters and - sf^2 is the signal variance. This provides an approximation to the - covariance function:: - - k(x,z) = x'*z + sn2*exp(0.5*(x-z)'*Lambda*(x-z)) - - where Lambda = diag((ell_1^2, ... ell_D^2)) + - Written by A. Marquand - """ + +
+
+
+
+ +

Source code for rfa

+from __future__ import print_function
+from __future__ import division
 
-    def __init__(self, hyp=None, X=None, y=None, n_feat=None,
-                 n_iter=100, tol=1e-3, verbose=False):
+import numpy as np
+import torch
 
-        self.hyp = np.nan
-        self.nlZ = np.nan
-        self.tol = tol          # not used at present
-        self.Nf = n_feat
-        self.n_iter = n_iter
-        self.verbose = verbose
-        self._n_restarts = 5
+
[docs]class GPRRFA: + """Random Feature Approximation for Gaussian Process Regression - if (hyp is not None) and (X is not None) and (y is not None): - self.post(hyp, X, y) + Estimation and prediction of Bayesian linear regression models - def _numpy2torch(self, X, y=None, hyp=None): + Basic usage:: - if type(X) is torch.Tensor: - pass - elif type(X) is np.ndarray: - X = torch.from_numpy(X) - else: - raise(ValueError, 'Unknown data type (X)') - X = X.double() + R = GPRRFA() + hyp = R.estimate(hyp0, X, y) + ys,s2 = R.predict(hyp, X, y, Xs) + + where the variables are + + :param hyp: vector of hyperparmaters. + :param X: N x D data array + :param y: 1D Array of targets (length N) + :param Xs: Nte x D array of test cases + :param hyp0: starting estimates for hyperparameter optimisation + + :returns: * ys - predictive mean + * s2 - predictive variance + + The hyperparameters are:: + + hyp = [ log(sn), log(ell), log(sf) ] # hyp is a numpy array + + where sn^2 is the noise variance, ell are lengthscale parameters and + sf^2 is the signal variance. This provides an approximation to the + covariance function:: + + k(x,z) = x'*z + sn2*exp(0.5*(x-z)'*Lambda*(x-z)) + + where Lambda = diag((ell_1^2, ... ell_D^2)) + + Written by A. Marquand + """ + + def __init__(self, hyp=None, X=None, y=None, n_feat=None, + n_iter=100, tol=1e-3, verbose=False): + + self.hyp = np.nan + self.nlZ = np.nan + self.tol = tol # not used at present + self.Nf = n_feat + self.n_iter = n_iter + self.verbose = verbose + self._n_restarts = 5 + + if (hyp is not None) and (X is not None) and (y is not None): + self.post(hyp, X, y) + + def _numpy2torch(self, X, y=None, hyp=None): + + if type(X) is torch.Tensor: + pass + elif type(X) is np.ndarray: + X = torch.from_numpy(X) + else: + raise(ValueError, 'Unknown data type (X)') + X = X.double() - if y is not None: - if type(y) is torch.Tensor: - pass - elif type(y) is np.ndarray: - y = torch.from_numpy(y) - else: - raise(ValueError, 'Unknown data type (y)') + if y is not None: + if type(y) is torch.Tensor: + pass + elif type(y) is np.ndarray: + y = torch.from_numpy(y) + else: + raise(ValueError, 'Unknown data type (y)') - if len(y.shape) == 1: - y.resize_(y.shape[0],1) - y = y.double() + if len(y.shape) == 1: + y.resize_(y.shape[0],1) + y = y.double() - if hyp is not None: - if type(hyp) is torch.Tensor: - pass - else: - hyp = torch.tensor(hyp, requires_grad=True) + if hyp is not None: + if type(hyp) is torch.Tensor: + pass + else: + hyp = torch.tensor(hyp, requires_grad=True) - return X, y, hyp + return X, y, hyp -
[docs] def get_n_params(self, X): +
[docs] def get_n_params(self, X): - return X.shape[1] + 2
+ return X.shape[1] + 2
-
[docs] def post(self, hyp, X, y): - """ Generic function to compute posterior distribution. +
[docs] def post(self, hyp, X, y): + """ Generic function to compute posterior distribution. - This function will save the posterior mean and precision matrix as - self.m and self.A and will also update internal parameters (e.g. - N, D and the prior covariance (Sigma) and precision (iSigma). - """ + This function will save the posterior mean and precision matrix as + self.m and self.A and will also update internal parameters (e.g. + N, D and the prior covariance (Sigma) and precision (iSigma). + """ - # make sure all variables are the right type - X, y, hyp = self._numpy2torch(X, y, hyp) + # make sure all variables are the right type + X, y, hyp = self._numpy2torch(X, y, hyp) - self.N, self.Dx = X.shape + self.N, self.Dx = X.shape - # ensure the number of features is specified (use 75% as a default) - if self.Nf is None: - self.Nf = int(0.75 * self.N) + # ensure the number of features is specified (use 75% as a default) + if self.Nf is None: + self.Nf = int(0.75 * self.N) - self.Omega = torch.zeros((self.Dx, self.Nf), dtype=torch.double) - for f in range(self.Nf): - self.Omega[:,f] = torch.exp(hyp[1:-1]) * \ - torch.randn((self.Dx, 1), dtype=torch.double).squeeze() - - XO = torch.mm(X, self.Omega) - self.Phi = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \ - torch.cat((torch.cos(XO), torch.sin(XO)), 1) + self.Omega = torch.zeros((self.Dx, self.Nf), dtype=torch.double) + for f in range(self.Nf): + self.Omega[:,f] = torch.exp(hyp[1:-1]) * \ + torch.randn((self.Dx, 1), dtype=torch.double).squeeze() + + XO = torch.mm(X, self.Omega) + self.Phi = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \ + torch.cat((torch.cos(XO), torch.sin(XO)), 1) - # concatenate linear weights - self.Phi = torch.cat((self.Phi, X), 1) - self.D = self.Phi.shape[1] + # concatenate linear weights + self.Phi = torch.cat((self.Phi, X), 1) + self.D = self.Phi.shape[1] - if self.verbose: - print("estimating posterior ... | hyp=", hyp) + if self.verbose: + print("estimating posterior ... | hyp=", hyp) - self.A = torch.mm(torch.t(self.Phi), self.Phi) / torch.exp(2*hyp[0]) + \ - torch.eye(self.D, dtype=torch.double) - self.m = torch.mm(torch.solve(torch.t(self.Phi), self.A)[0], y) / \ - torch.exp(2*hyp[0]) + self.A = torch.mm(torch.t(self.Phi), self.Phi) / torch.exp(2*hyp[0]) + \ + torch.eye(self.D, dtype=torch.double) + self.m = torch.mm(torch.solve(torch.t(self.Phi), self.A)[0], y) / \ + torch.exp(2*hyp[0]) - # save hyperparameters - self.hyp = hyp + # save hyperparameters + self.hyp = hyp - # update optimizer iteration count - if hasattr(self,'_iterations'): - self._iterations += 1
- -
[docs] def loglik(self, hyp, X, y): - """ Function to compute compute log (marginal) likelihood """ - X, y, hyp = self._numpy2torch(X, y, hyp) - - # always recompute the posterior - self.post(hyp, X, y) - - #logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A)))) - try: - # compute the log determinants in a numerically stable way - logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A)))) - except Exception as e: - print("Warning: Estimation of posterior distribution failed") - print(e) - #nlZ = torch.tensor(1/np.finfo(float).eps) - nlZ = torch.tensor(np.nan) - self._optim_failed = True - return nlZ + # update optimizer iteration count + if hasattr(self,'_iterations'): + self._iterations += 1
+ +
[docs] def loglik(self, hyp, X, y): + """ Function to compute compute log (marginal) likelihood """ + X, y, hyp = self._numpy2torch(X, y, hyp) + + # always recompute the posterior + self.post(hyp, X, y) + + #logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A)))) + try: + # compute the log determinants in a numerically stable way + logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A)))) + except Exception as e: + print("Warning: Estimation of posterior distribution failed") + print(e) + #nlZ = torch.tensor(1/np.finfo(float).eps) + nlZ = torch.tensor(np.nan) + self._optim_failed = True + return nlZ - # compute negative marginal log likelihood - nlZ = -0.5 * (self.N*torch.log(1/torch.exp(2*hyp[0])) - - self.N*np.log(2*np.pi) - - torch.mm(torch.t(y - torch.mm(self.Phi,self.m)), - (y - torch.mm(self.Phi,self.m))) / - torch.exp(2*hyp[0]) - - torch.mm(torch.t(self.m), self.m) - logdetA) + # compute negative marginal log likelihood + nlZ = -0.5 * (self.N*torch.log(1/torch.exp(2*hyp[0])) - + self.N*np.log(2*np.pi) - + torch.mm(torch.t(y - torch.mm(self.Phi,self.m)), + (y - torch.mm(self.Phi,self.m))) / + torch.exp(2*hyp[0]) - + torch.mm(torch.t(self.m), self.m) - logdetA) - if self.verbose: - print("nlZ= ", nlZ, " | hyp=", hyp) + if self.verbose: + print("nlZ= ", nlZ, " | hyp=", hyp) - # save marginal likelihood - self.nlZ = nlZ - return nlZ
+ # save marginal likelihood + self.nlZ = nlZ + return nlZ
-
[docs] def dloglik(self, hyp, X, y): - """ Function to compute derivatives """ +
[docs] def dloglik(self, hyp, X, y): + """ Function to compute derivatives """ - print("derivatives not available") + print("derivatives not available") - return
+ return
-
[docs] def estimate(self, hyp0, X, y, optimizer='lbfgs'): - """ Function to estimate the model """ +
[docs] def estimate(self, hyp0, X, y, optimizer='lbfgs'): + """ Function to estimate the model """ - if type(hyp0) is torch.Tensor: - hyp = hyp0 - hyp0.requires_grad_() - else: - hyp = torch.tensor(hyp0, requires_grad=True) - # save the starting values - self.hyp0 = hyp + if type(hyp0) is torch.Tensor: + hyp = hyp0 + hyp0.requires_grad_() + else: + hyp = torch.tensor(hyp0, requires_grad=True) + # save the starting values + self.hyp0 = hyp - if optimizer.lower() == 'lbfgs': - opt = torch.optim.LBFGS([hyp]) - else: - raise(ValueError, "Optimizer " + " not implemented") - self._iterations = 0 + if optimizer.lower() == 'lbfgs': + opt = torch.optim.LBFGS([hyp]) + else: + raise(ValueError, "Optimizer " + " not implemented") + self._iterations = 0 - def closure(): - opt.zero_grad() - nlZ = self.loglik(hyp, X, y) - if not torch.isnan(nlZ): - nlZ.backward() - return nlZ + def closure(): + opt.zero_grad() + nlZ = self.loglik(hyp, X, y) + if not torch.isnan(nlZ): + nlZ.backward() + return nlZ - for r in range(self._n_restarts): - self._optim_failed = False + for r in range(self._n_restarts): + self._optim_failed = False - nlZ = opt.step(closure) + nlZ = opt.step(closure) - if self._optim_failed: - print("optimization failed. retrying (", r+1, "of", - self._n_restarts,")") - hyp = torch.randn_like(hyp, requires_grad=True) - self.hyp0 = hyp - else: - print("Optimzation complete after", self._iterations, - "evaluations. Function value =", - nlZ.detach().numpy().squeeze()) - break - - return self.hyp.detach().numpy()
- -
[docs] def predict(self, hyp, X, y, Xs): - """ Function to make predictions from the model """ - - X, y, hyp = self._numpy2torch(X, y, hyp) - Xs, *_ = self._numpy2torch(Xs) - - if (hyp != self.hyp).all() or not(hasattr(self, 'A')): - self.post(hyp, X, y) + if self._optim_failed: + print("optimization failed. retrying (", r+1, "of", + self._n_restarts,")") + hyp = torch.randn_like(hyp, requires_grad=True) + self.hyp0 = hyp + else: + print("Optimzation complete after", self._iterations, + "evaluations. Function value =", + nlZ.detach().numpy().squeeze()) + break + + return self.hyp.detach().numpy()
+ +
[docs] def predict(self, hyp, X, y, Xs): + """ Function to make predictions from the model """ + + X, y, hyp = self._numpy2torch(X, y, hyp) + Xs, *_ = self._numpy2torch(Xs) + + if (hyp != self.hyp).all() or not(hasattr(self, 'A')): + self.post(hyp, X, y) - # generate prediction tensors - XsO = torch.mm(Xs, self.Omega) - Phis = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \ - torch.cat((torch.cos(XsO), torch.sin(XsO)), 1) - # add linear component - Phis = torch.cat((Phis, Xs), 1) + # generate prediction tensors + XsO = torch.mm(Xs, self.Omega) + Phis = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \ + torch.cat((torch.cos(XsO), torch.sin(XsO)), 1) + # add linear component + Phis = torch.cat((Phis, Xs), 1) - ys = torch.mm(Phis, self.m) + ys = torch.mm(Phis, self.m) - # compute diag(Phis*(Phis'\A)) avoiding computing off-diagonal entries - s2 = torch.exp(2*hyp[0]) + \ - torch.sum(Phis * torch.t(torch.solve(torch.t(Phis), self.A)[0]), 1) + # compute diag(Phis*(Phis'\A)) avoiding computing off-diagonal entries + s2 = torch.exp(2*hyp[0]) + \ + torch.sum(Phis * torch.t(torch.solve(torch.t(Phis), self.A)[0]), 1) - # return output as numpy arrays - return ys.detach().numpy().squeeze(), s2.detach().numpy().squeeze()
+ # return output as numpy arrays + return ys.detach().numpy().squeeze(), s2.detach().numpy().squeeze()
-
+
+
+
+ +
+ +
+

+ © Copyright 2020, Andre F. Marquand. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
- -
-
- - - + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_modules/trendsurf.html b/doc/build/html/_modules/trendsurf.html index 168c8450..cd5f8a83 100644 --- a/doc/build/html/_modules/trendsurf.html +++ b/doc/build/html/_modules/trendsurf.html @@ -1,341 +1,486 @@ - - - - - - - trendsurf — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + trendsurf — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
-
-
-
+ + + + + + +
+ +
- - - - + + + +
+ + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_sources/_templates/class.rst.txt b/doc/build/html/_sources/_templates/class.rst.txt new file mode 100644 index 00000000..e9885ddb --- /dev/null +++ b/doc/build/html/_sources/_templates/class.rst.txt @@ -0,0 +1,12 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :no-members: + :no-inherited-members: + +.. raw:: html + +
\ No newline at end of file diff --git a/doc/build/html/_sources/_templates/function.rst.txt b/doc/build/html/_sources/_templates/function.rst.txt new file mode 100644 index 00000000..4e6d1428 --- /dev/null +++ b/doc/build/html/_sources/_templates/function.rst.txt @@ -0,0 +1,12 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autofunction:: {{ objname }} + +.. .. include:: modules/{{ module }}.{{ objname }}.examples + +.. raw:: html + +
\ No newline at end of file diff --git a/doc/build/html/_sources/index.rst.txt b/doc/build/html/_sources/index.rst.txt index ac5f5872..0e08e020 100644 --- a/doc/build/html/_sources/index.rst.txt +++ b/doc/build/html/_sources/index.rst.txt @@ -1,22 +1,50 @@ -.. Spatial methods for neuroimaging documentation master file, created by - sphinx-quickstart on Tue Aug 23 15:22:22 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. Predictive Clinical Neuroscience toolkit ======================================== -Contents: .. toctree:: - :maxdepth: 2 + :maxdepth: 1 + :caption: Getting started + + pages/installation.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Background + + pages/pcntoolkit_background.rst +.. toctree:: + :maxdepth: 1 + :caption: Function & Class Docs + modindex.rst -Indices and tables -================== +.. toctree:: + :maxdepth: 1 + :caption: Current Events + + pages/updates.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Tutorials + + pages/tutorial_CPC2020.rst + pages/tutorial_ROIcorticalthickness.rst + pages/tutorial_HBR.rst + -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` +.. toctree:: + :maxdepth: 1 + :caption: Other Useful Stuff + pages/FAQs.rst + pages/glossary.rst + pages/citing.rst + pages/references.rst + pages/acknowledgements.rst + diff --git a/doc/build/html/_sources/index.txt b/doc/build/html/_sources/index.txt deleted file mode 100644 index c3b48855..00000000 --- a/doc/build/html/_sources/index.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Spatial methods for neuroimaging documentation master file, created by - sphinx-quickstart on Tue Aug 23 15:22:22 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Spatial methods for neuroimaging -================================ - -Contents: - -.. toctree:: - :maxdepth: 2 - - modindex.rst - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/build/html/_sources/modindex.rst.txt b/doc/build/html/_sources/modindex.rst.txt index b44c2ac5..dd73caf6 100644 --- a/doc/build/html/_sources/modindex.rst.txt +++ b/doc/build/html/_sources/modindex.rst.txt @@ -43,8 +43,8 @@ Module Index :inherited-members: :show-inheritance: -.. automodule:: utils +.. automodule:: util :members: :undoc-members: :inherited-members: - :show-inheritance: \ No newline at end of file + :show-inheritance: diff --git a/doc/build/html/_sources/modindex.txt b/doc/build/html/_sources/modindex.txt deleted file mode 100644 index 8069db4a..00000000 --- a/doc/build/html/_sources/modindex.txt +++ /dev/null @@ -1,14 +0,0 @@ -Module Index -************ - -.. automodule:: bayesreg - :members: - :undoc-members: - :inherited-members: - :show-inheritance: - -.. automodule:: gp - :members: - :undoc-members: - :inherited-members: - :show-inheritance: diff --git a/doc/build/html/_sources/pages/FAQs.rst.txt b/doc/build/html/_sources/pages/FAQs.rst.txt new file mode 100644 index 00000000..3f846e76 --- /dev/null +++ b/doc/build/html/_sources/pages/FAQs.rst.txt @@ -0,0 +1,3 @@ +Frequently Asked Questions +==================================== + diff --git a/doc/build/html/_sources/pages/_templates/class.rst.txt b/doc/build/html/_sources/pages/_templates/class.rst.txt new file mode 100644 index 00000000..e9885ddb --- /dev/null +++ b/doc/build/html/_sources/pages/_templates/class.rst.txt @@ -0,0 +1,12 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :no-members: + :no-inherited-members: + +.. raw:: html + +
\ No newline at end of file diff --git a/doc/build/html/_sources/pages/_templates/function.rst.txt b/doc/build/html/_sources/pages/_templates/function.rst.txt new file mode 100644 index 00000000..4e6d1428 --- /dev/null +++ b/doc/build/html/_sources/pages/_templates/function.rst.txt @@ -0,0 +1,12 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autofunction:: {{ objname }} + +.. .. include:: modules/{{ module }}.{{ objname }}.examples + +.. raw:: html + +
\ No newline at end of file diff --git a/doc/build/html/_sources/pages/acknowledgements.rst.txt b/doc/build/html/_sources/pages/acknowledgements.rst.txt new file mode 100644 index 00000000..84d9b871 --- /dev/null +++ b/doc/build/html/_sources/pages/acknowledgements.rst.txt @@ -0,0 +1,14 @@ +Acknowledgements +================== + +We gratefully acknowledge funding from the Dutch Organisation for Scientific Research (NWO), via a Vernieuwingsimpuls VIDI fellowship, from the UK Wellcome Trust via a Digital Innovator grant and from the UK Medical Research Council via an Experimental Medicine Challenge Grant. + +Core developers of the toolbox are: + +- Andre Marquand +- Seyed Mostafa Kia +- Thomas Wolfers +- Saige Rutherford +- Richard Dinga +- Mariam Zabihi +- Charlotte Fraza diff --git a/doc/build/html/_sources/pages/citing.rst.txt b/doc/build/html/_sources/pages/citing.rst.txt new file mode 100644 index 00000000..fa9930d3 --- /dev/null +++ b/doc/build/html/_sources/pages/citing.rst.txt @@ -0,0 +1,31 @@ +How to cite PCNtoolkit +==================================== + +If you use the PCNtoolkit, please consider citing some of the following work: + + +Marquand, A. F., Wolfers, T., Mennes, M., Buitelaar, J., & Beckmann, C. F. (2016). Beyond Lumping and Splitting: A Review of Computational Approaches for Stratifying Psychiatric Disorders. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 1(5), 433–447. https://doi.org/10.1016/j.bpsc.2016.04.002 + +Marquand, A. F., Rezek, I., Buitelaar, J., & Beckmann, C. F. (2016). Understanding Heterogeneity in Clinical Cohorts Using Normative Models: Beyond Case-Control Studies. Biological Psychiatry, 80(7), 552–561. https://doi.org/10.1016/j.biopsych.2015.12.023 + +Marquand, A. F., Kia, S. M., Zabihi, M., Wolfers, T., Buitelaar, J. K., & Beckmann, C. F. (2019). Conceptualizing mental disorders as deviations from normative functioning. Molecular Psychiatry, 24(10), 1415–1424. https://doi.org/10.1038/s41380-019-0441-1 + +Marquand, A. F., Haak, K. V., & Beckmann, C. F. (2017). Functional corticostriatal connection topographies predict goal directed behaviour in humans. Nature Human Behaviour, 1(8). https://doi.org/10.1038/s41562-017-0146 + +Wolfers, T., Beckmann, C. F., Hoogman, M., Buitelaar, J. K., Franke, B., & Marquand, A. F. (2020). Individual differences v. the average patient: Mapping the heterogeneity in ADHD using normative models. Psychological Medicine, 50(2), 314–323. https://doi.org/10.1017/S0033291719000084 + +Wolfers, T., Rokicki, J., Alnæs, D., Berthet, P., Agartz, I., Kia, S. M., Kaufmann, T., Zabihi, M., Moberget, T., Melle, I., Beckmann, C. F., Andreassen, O. A., Marquand, A. F., & Westlye, L. T. (n.d.). Replicating extensive brain structural heterogeneity in individuals with schizophrenia and bipolar disorder. Human Brain Mapping, n/a(n/a). https://doi.org/10.1002/hbm.25386 + +Zabihi, M., Floris, D. L., Kia, S. M., Wolfers, T., Tillmann, J., Arenas, A. L., Moessnang, C., Banaschewski, T., Holt, R., Baron-Cohen, S., Loth, E., Charman, T., Bourgeron, T., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. (2020). Fractionating autism based on neuroanatomical normative modeling. Translational Psychiatry, 10(1), 1–10. https://doi.org/10.1038/s41398-020-01057-0 + +Zabihi, M., Oldehinkel, M., Wolfers, T., Frouin, V., Goyard, D., Loth, E., Charman, T., Tillmann, J., Banaschewski, T., Dumas, G., Holt, R., Baron-Cohen, S., Durston, S., Bölte, S., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. F. (2019). Dissecting the Heterogeneous Cortical Anatomy of Autism Spectrum Disorder Using Normative Models. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 4(6), 567–578. https://doi.org/10.1016/j.bpsc.2018.11.013 + +Kia, S. M., & Marquand, A. (2018). Normative Modeling of Neuroimaging Data using Scalable Multi-Task Gaussian Processes. ArXiv:1806.01047 [Cs, Stat]. http://arxiv.org/abs/1806.01047 + +Kia, S. M., Beckmann, C. F., & Marquand, A. F. (2018). Scalable Multi-Task Gaussian Process Tensor Regression for Normative Modeling of Structured Variation in Neuroimaging Data. ArXiv:1808.00036 [Cs, Stat]. http://arxiv.org/abs/1808.00036 + +Kia, S. M., Huijsdens, H., Dinga, R., Wolfers, T., Mennes, M., Andreassen, O. A., Westlye, L. T., Beckmann, C. F., & Marquand, A. F. (2020). Hierarchical Bayesian Regression for Multi-site Normative Modeling of Neuroimaging Data. In A. L. Martel, P. Abolmaesumi, D. Stoyanov, D. Mateus, M. A. Zuluaga, S. K. Zhou, D. Racoceanu, & L. Joskowicz (Eds.), Medical Image Computing and Computer Assisted Intervention – MICCAI 2020 (pp. 699–709). Springer International Publishing. https://doi.org/10.1007/978-3-030-59728-3_68 + +Huertas, I., Oldehinkel, M., van Oort, E. S. B., Garcia-Solis, D., Mir, P., Beckmann, C. F., & Marquand, A. F. (2017). A Bayesian spatial model for neuroimaging data based on biologically informed basis functions. NeuroImage, 161, 134–148. https://doi.org/10.1016/j.neuroimage.2017.08.009 + +Fraza, C. J., Dinga, R., Beckmann, C. F., & Marquand, A. F. (2021). Warped Bayesian Linear Regression for Normative Modelling of Big Data. BioRxiv, 2021.04.05.438429. https://doi.org/10.1101/2021.04.05.438429 diff --git a/doc/build/html/_sources/pages/glossary.rst.txt b/doc/build/html/_sources/pages/glossary.rst.txt new file mode 100644 index 00000000..4ed67d03 --- /dev/null +++ b/doc/build/html/_sources/pages/glossary.rst.txt @@ -0,0 +1,2 @@ +Glossary +=========== diff --git a/doc/build/html/_sources/pages/installation.rst.txt b/doc/build/html/_sources/pages/installation.rst.txt new file mode 100644 index 00000000..98c5c9fb --- /dev/null +++ b/doc/build/html/_sources/pages/installation.rst.txt @@ -0,0 +1,100 @@ +Installation +================== + +Basic installation (on a local machine) +******************************************* + +1. Install anaconda3 + +2. Create enviornment + +.. code-block:: bash + + conda create --name + +3. Activate environment + +.. code-block:: bash + + source activate + +4. Install required conda packages + +.. code-block:: bash + + conda install pip pandas scipy + +5. Install PCNtoolkit (plus dependencies) + +.. code-block:: bash + + pip install pcntoolkit + +Alternative installation (on a shared resource) +************************************************** + +1. Make sure conda is available on the system. Otherwise install it first from https://www.anaconda.com/ + +.. code-block:: bash + + conda --version + + +2. Create a conda environment in a shared location + +.. code-block:: bash + + conda create -y python==3.7.7 numpy mkl blas --prefix=/shared/conda/ + + +3. Activate the conda environment + +.. code-block:: bash + + conda activate /shared/conda/ + + +4. Install other dependencies + +.. code-block:: bash + + conda install -y pandas scipy + + +5. Install pip dependencies + +.. code-block:: bash + + pip --no-cache-dir install nibabel sklearn torch glob3 + + +6. Clone the repo + +.. code-block:: bash + + git clone https://github.com/amarquand/PCNtoolkit.git + + +7. Install in the conda environment + +.. code-block:: bash + + cd PCNtoolkit/ + python3 setup.py install + + +8. Test + +.. code-block:: bash + + python -c "import pcntoolkit as pk;print(pk.__file__)" + + +Quickstart usage +**************************** + +For normative modelling, functionality is handled by the ``normative.py`` script, which can be run from the command line, e.g. + +.. code-block:: bash + + python normative.py -c /path/to/training/covariates -t /path/to/test/covariates -r /path/to/test/response/variables /path/to/my/training/response/variables diff --git a/doc/build/html/_sources/pages/pcntoolkit_background.rst.txt b/doc/build/html/_sources/pages/pcntoolkit_background.rst.txt new file mode 100644 index 00000000..4f398e24 --- /dev/null +++ b/doc/build/html/_sources/pages/pcntoolkit_background.rst.txt @@ -0,0 +1,178 @@ +PCNtoolkit Background +==================================== + +What is the PCNtoolkit? +**************************** + +Predictive Clinical Neuroscience (PCN) toolkit (formerly nispat) is a python package designed for multi-purpose tasks in clinical neuroimaging, including normative modelling, trend surface modelling in addition to providing implementations of a number of fundamental machine learning algorithms. + +Intro to normative modelling +=============================== + +Normative modelling essentially aims to predict centiles of variance in a response variable (e.g. a region of interest or other neuroimaging-derived measure) on the basis of a set of covariates (e.g. age, clinical scores, diagnosis) A conceptual overview of the approach can be found in this `publication `_. For example, the image below shows an example of a normative model that aims to predict vertex-wise cortical thickness data, essentially fitting a separate model for each vertex. + +.. figure:: ./nm_concept.png + :height: 300px + :align: center + +In practice, this is done by regressing the biological response variables against a set of clinical or demographic covariates. In the instructions that follow, it is helpful to think of these as being stored in matrices as shown below: + +.. figure:: ./nm_overview.png + :height: 300px + :align: center + +There are many options for this, but techniques that provide a distributional form for the centiles are appealing, since they help to estimate extreme centiles more efficiently. Bayesian methods are also beneficial in this regard because they also allow separation of modelling uncertainty from variation in the data. Many applications of normative modelling use Gaussian Process Regression, which is the default method in this toolkit. Typically (but not `always `_), each response variable is estimated independently. + +Data formats +**************************** + +Generally the covariates are specified in text format, roughly following the FSL convention in that the text file should contain one entry +(i.e. subject) per line, with columns space or tab separated and no headers. For example: + +.. code-block:: bash + + head cov.txt + 52 55 94 4.6 + 49 43 59 4.6 + 56 80 63 5.6 + 39 48 42 4.3 + + +For the response variables, the following data formats are supported: + +* NIfTI (e.g. .nii.gz or .img/.hdr) +* CIFTI (e.g. .dtseries.nii) +* Pickle/pandas (e.g. .pkl) +* ASCII text (e.g. .txt, .csv, .tsv) + +For nifti/cifti formats, data should be in timeseries format with subjects along the time dimension and these images will be masked and reshaped into vectors. If no mask is specified, one will be created automatically from the image data. + +Basic usage (command line) +**************************** + +The simplest method to estimate a normative model is using the ``normative.py`` script which can be run from the command line or imported as a python module. For example, the following command will estimate a normative model on the basis of the matrix of covariates and responses specified in cov.txt and resp.txt respectively. These are simply tab or space separated ASCII text files that contain the variables of interest, with one subject per row. + +.. code-block:: bash + + python normative.py -c cov.txt -k 5 -a blr resp.txt + + +The argument ``-a blr`` tells the script to use Bayesian Linear regression rather than the default Gaussian process regression model and ``-k 5`` tells the script to run internal 5-fold cross-validation across all subjects in the covariates and responses files. Alternatively, the model can be evaluated on a separate dataset by specifying test covariates (and optionally also test responses). +The following estimation algorithms are supported + +**Table 1: Estimation algorithms** + +================= ================================= ============================================================================================= +**key value** **Description** **Reference** +----------------- --------------------------------- --------------------------------------------------------------------------------------------- +hbr Hierarchical Bayesian Regression `Kia et al 2020 `_ +blr Bayesian Linear Regression `Huertas et al 2017 `_ +np Neural Processes `Kia et al 2018 `_ +rfa Random Feature Approximation `Rahimi and Recht 2007 `_ +================= ================================= ============================================================================================= + + +Note that keyword arguments can also be specified from the command line to offer additional flexibility. For example, the following command will fit a normative model to the same data, but without standardizing the data first and additionally writing out model coefficients (this is not done by default because they can use a lot of disk space). + +.. code-block:: bash + + python normative.py -c cov.txt -k 5 -a blr resp.txt standardize=False savemodel=True + + +A full set of keyword arguments is provided in the table below. At a minimum, a set of responses and covariates must be provided and either the corresponding number of cross-validation folds or a set of test covariates. + +**Table 2: Keywords and command line arguments** + +============ ========================= ========================================================================================== +**Keyword** **Command line shortcut** **Description** +------------ ------------------------- ------------------------------------------------------------------------------------------ +covfunc -c filename Covariate file +cvfolds -k num_folds Number of cross-validation folds +testcov -t filename Test covariates +testresp -r filename Test responses +maskfile -m filename mask to apply to the response variables (nifti/cifti only) +alg -a algorithm Estimation algorithm: 'gpr' (default), 'blr', 'np', 'hbr' or 'rfa'. See table above. +function -f function function to call (estimate, predict, transfer, extend). See below +standardize -s (skip) Standardize the covariates and response variables using the training data +configparam -x config Pass the value of config to the estimation algorithm (deprecated) +outputsuffix Suffix to apply to the output variables +saveoutput Write output (default = True) +savemodel Save the model coefficients and meta-data (default = False) +warp Warping function to apply to the responses (blr only) +============ ========================= ========================================================================================== + +Basic usage (scripted) +**************************** + +The same can be done by importing the estimate function from ``normative.py``. For example, the following code snippet will: (i) mask the nifti data specified in resp_train.nii.gz using the mask specified (which must have the same voxel size as the response variables) (ii) fit a linear normative model to each voxel, (iii) apply this to make predictions using the test covariates and (iv) compute deviation scores and error metrics by comparing against the true test response variables. + +.. code-block:: python + + from pcntoolkit.normative import estimate + + # estimate a normative model + estimate("cov_train.txt", "resp_train.nii.gz", maskfile="mask.nii.gz", \ + testresp="resp_test.nii.gz", testcov="cov_test.txt", alg="blr") + + +The estimate function does all these operations in a single step. In some cases it may be desirable to separate these steps. For example, if a normative model has been estimated on a large dataset, it may be desirable to save the model before applying it to a new dataset (e.g. from a a different site). For example, the following code snippet will first fit a model, then apply it to a set of dummy covariates so that the normative model can be plotted + +.. code-block:: python + + from pcntoolkit.normative import estimate, predict + + # fit a normative model, using training covariates and responses + # then apply to test dataset. Saved with file suffix '_estimate' + estimate(cov_file_tr, resp_file_tr, testresp=resp_file_te, \ + testcov=cov_file_te, alg='blr', optimizer = 'powell', \ + savemodel=True, standardize = False) + + # make predictions on a set of dummy covariates (with no responses) + # Saved with file suffix '_predict' + yhat, s2 = predict(cov_file_dummy) + +For further information, see the `developer documentation `_. The same can be achieved from the command line, using te ``-f`` argument, for example, by specifying ``-f predict``. + +Paralellising estimation to speed things up +********************************************** + +Normative model estimation is typically quite computationally expensive, especially for large datasets. This is exacerbated by high-resolution data (e.g. voxelwise data). For such cases normative model estimation can be paralellised across multiple compute nodes which can be achieved using the ``normative_parallel.py`` script. This involves splitting the response matrix into a set of batches, each of a specified size, i.e.: + +.. figure:: ./nm_parallel.png + :height: 300px + :align: center + +Each of these are then submitted to a cluster and reassembled once the cluster jobs have been completed. The following code snippet illustrates this procedure: + +.. code-block:: python + + from pcntoolkit.normative_parallel import execute_nm, collect_nm, delete_nm + + # General config parameters + normative_path = '//pcntoolkit/normative.py' + python_path='//bin/python' + working_dir = '//' + log_dir = '//' + + # cluster paramateters + job_name = 'nm_demo' # name for the cluster job + batch_size = 10 # number of models (e.g. voxels) per batch + memory = '4gb' # memory required + duration = '01:00:00' # walltime + cluster = 'torque' + + # fit the model. Specifying binary=True means results will be stored in .pkl format + execute_nm(working_dir, python_path, normative_path, job_name, cov_file.txt, \ + resp_file.pkl, batch_size, memory, duration, cluster_spec=cluster, \ + cv_folds=2, log_path=log_dir, binary=True) + + # wait until jobs complete ... + + # reassemble results + collect_nm(working_dir, job_name, collect=True, binary=True) + + # remove temporary files + delete_nm(working_dir, binary=True) + + +At the present time, only ASCII and pickle format are supported using normative parallel. Note also that it may be necessary to customise the script to support your local cluster architecture. This can be done using fairly obvious modifications to the ``execute_nm()`` function. \ No newline at end of file diff --git a/doc/build/html/_sources/pages/references.rst.txt b/doc/build/html/_sources/pages/references.rst.txt new file mode 100644 index 00000000..20905003 --- /dev/null +++ b/doc/build/html/_sources/pages/references.rst.txt @@ -0,0 +1,32 @@ +.. _referencelist: + +.. title:: List of references + +References +================== + +Marquand, A. F., Wolfers, T., Mennes, M., Buitelaar, J., & Beckmann, C. F. (2016). Beyond Lumping and Splitting: A Review of Computational Approaches for Stratifying Psychiatric Disorders. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 1(5), 433–447. https://doi.org/10.1016/j.bpsc.2016.04.002 + +Marquand, A. F., Rezek, I., Buitelaar, J., & Beckmann, C. F. (2016). Understanding Heterogeneity in Clinical Cohorts Using Normative Models: Beyond Case-Control Studies. Biological Psychiatry, 80(7), 552–561. https://doi.org/10.1016/j.biopsych.2015.12.023 + +Marquand, A. F., Kia, S. M., Zabihi, M., Wolfers, T., Buitelaar, J. K., & Beckmann, C. F. (2019). Conceptualizing mental disorders as deviations from normative functioning. Molecular Psychiatry, 24(10), 1415–1424. https://doi.org/10.1038/s41380-019-0441-1 + +Marquand, A. F., Haak, K. V., & Beckmann, C. F. (2017). Functional corticostriatal connection topographies predict goal directed behaviour in humans. Nature Human Behaviour, 1(8). https://doi.org/10.1038/s41562-017-0146 + +Wolfers, T., Beckmann, C. F., Hoogman, M., Buitelaar, J. K., Franke, B., & Marquand, A. F. (2020). Individual differences v. the average patient: Mapping the heterogeneity in ADHD using normative models. Psychological Medicine, 50(2), 314–323. https://doi.org/10.1017/S0033291719000084 + +Wolfers, T., Rokicki, J., Alnæs, D., Berthet, P., Agartz, I., Kia, S. M., Kaufmann, T., Zabihi, M., Moberget, T., Melle, I., Beckmann, C. F., Andreassen, O. A., Marquand, A. F., & Westlye, L. T. (n.d.). Replicating extensive brain structural heterogeneity in individuals with schizophrenia and bipolar disorder. Human Brain Mapping, n/a(n/a). https://doi.org/10.1002/hbm.25386 + +Zabihi, M., Floris, D. L., Kia, S. M., Wolfers, T., Tillmann, J., Arenas, A. L., Moessnang, C., Banaschewski, T., Holt, R., Baron-Cohen, S., Loth, E., Charman, T., Bourgeron, T., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. (2020). Fractionating autism based on neuroanatomical normative modeling. Translational Psychiatry, 10(1), 1–10. https://doi.org/10.1038/s41398-020-01057-0 + +Zabihi, M., Oldehinkel, M., Wolfers, T., Frouin, V., Goyard, D., Loth, E., Charman, T., Tillmann, J., Banaschewski, T., Dumas, G., Holt, R., Baron-Cohen, S., Durston, S., Bölte, S., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. F. (2019). Dissecting the Heterogeneous Cortical Anatomy of Autism Spectrum Disorder Using Normative Models. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 4(6), 567–578. https://doi.org/10.1016/j.bpsc.2018.11.013 + +Kia, S. M., & Marquand, A. (2018). Normative Modeling of Neuroimaging Data using Scalable Multi-Task Gaussian Processes. ArXiv:1806.01047 [Cs, Stat]. http://arxiv.org/abs/1806.01047 + +Kia, S. M., Beckmann, C. F., & Marquand, A. F. (2018). Scalable Multi-Task Gaussian Process Tensor Regression for Normative Modeling of Structured Variation in Neuroimaging Data. ArXiv:1808.00036 [Cs, Stat]. http://arxiv.org/abs/1808.00036 + +Kia, S. M., Huijsdens, H., Dinga, R., Wolfers, T., Mennes, M., Andreassen, O. A., Westlye, L. T., Beckmann, C. F., & Marquand, A. F. (2020). Hierarchical Bayesian Regression for Multi-site Normative Modeling of Neuroimaging Data. In A. L. Martel, P. Abolmaesumi, D. Stoyanov, D. Mateus, M. A. Zuluaga, S. K. Zhou, D. Racoceanu, & L. Joskowicz (Eds.), Medical Image Computing and Computer Assisted Intervention – MICCAI 2020 (pp. 699–709). Springer International Publishing. https://doi.org/10.1007/978-3-030-59728-3_68 + +Huertas, I., Oldehinkel, M., van Oort, E. S. B., Garcia-Solis, D., Mir, P., Beckmann, C. F., & Marquand, A. F. (2017). A Bayesian spatial model for neuroimaging data based on biologically informed basis functions. NeuroImage, 161, 134–148. https://doi.org/10.1016/j.neuroimage.2017.08.009 + +Fraza, C. J., Dinga, R., Beckmann, C. F., & Marquand, A. F. (2021). Warped Bayesian Linear Regression for Normative Modelling of Big Data. BioRxiv, 2021.04.05.438429. https://doi.org/10.1101/2021.04.05.438429 diff --git a/doc/build/html/_sources/pages/scripts.rst.txt b/doc/build/html/_sources/pages/scripts.rst.txt new file mode 100644 index 00000000..4ed17f27 --- /dev/null +++ b/doc/build/html/_sources/pages/scripts.rst.txt @@ -0,0 +1,211 @@ +Intro to normative modelling +=============================== + +Normative modelling essentially aims to predict centiles of variance in a response variable (e.g. a region of interest or other neuroimaging-derived measure) on the basis of a set of covariates (e.g. age, clinical scores, diagnosis) A conceptual overview of the approach can be found in [this publication](https://www.nature.com/articles/s41380-019-0441-1). For example, the image below shows an example of a normative model that aims to predict vertex-wise cortical thickness data, essentially fitting a separate model for each vertex. + +.. figure:: ./nm_concept.png + :height: 300px + :align: center + +In practice, this is done by regressing the biological response variables against a set of clinical or demographic covariates. In the instructions that follow, it is helpful to think of these as being stored in matrices as shown below: + +.. figure:: ./nm_overview.png + :height: 300px + :align: center + +There are many options for this, but techniques that provide a distributional form for the centiles are appealing, since they help to estimate extreme centiles more efficiently. Bayesian methods are also beneficial in this regard because they also allow separation of modelling uncertainty from variation in the data. Many applications of normative modelling use Gaussian Process Regression, which is the default method in this toolkit. Typically (but not [always](https://link.springer.com/chapter/10.1007/978-3-030-00931-1_15)), each response variable is estimated independently. + +Data formats +**************************** + +Generally the covariates are specified in text format, roughly following the FSL convention in that the text file should contain one entry +(i.e. subject) per line, with columns space or tab separated and no headers. For example: + +.. code-block:: bash + + head cov.txt + 52 55 94 4.6 + 49 43 59 4.6 + 56 80 63 5.6 + 39 48 42 4.3 + + +For the response variables, the following data formats are supported: + +* NIfTI (e.g. .nii.gz or .img/.hdr) +* CIFTI (e.g. .dtseries.nii) +* Pickle/pandas (e.g. .pkl) +* ASCII text (e.g. .txt, .csv, .tsv) + +For nifti/cifti formats, data should be in timeseries format with subjects along the time dimension and these images will be masked and reshaped into vectors. If no mask is specified, one will be created automatically from the image data. + +Basic usage (command line) +**************************** + +The simplest method to estimate a normative model is using the ```normative.py``` script which can be run from the command line or imported as a python module. For example, the following command will estimate a normative model on the basis of the matrix of covariates and responses specified in cov.txt and resp.txt respectively. These are simply tab or space separated ASCII text files that contain the variables of interest, with one subject per row. + +.. code-block:: bash + + python normative.py -c cov.txt -k 5 -a blr resp.txt + + +The argument ``-a blr`` tells the script to use Bayesian Linear regression rather than the default Gaussian process regression model and ``-k 5`` tells the script to run internal 5-fold cross-validation across all subjects in the covariates and responses files. Alternatively, the model can be evaluated on a separate dataset by specifying test covariates (and optionally also test responses). +The following estimation algorithms are supported + + +**Table 1:** Estimation algorithms +.. list-table:: + :widths: 50 50 50 + :header-rows: 1 + + * -key value + -Description + -Reference + * -gpr (default) + -Gaussian Process Regression + -Marquand et al 2016 https://www.sciencedirect.com/science/article/pii/S0006322316000020 + * -hbr + -Hierarchical Bayesian Regression + -Kia et al 2020 https://arxiv.org/abs/2005.12055 + * -blr + -Bayesian Linear Regression + -Huertas et al 2017 https://www.sciencedirect.com/science/article/pii/S1053811917306560 + * -np + -Neural Processes + -Kia et al 2018 https://arxiv.org/abs/1812.04998 + * -rfa + -Random Feature Approximation + -Rahimi and Recht 2007 https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf + +Note that keyword arguments can also be specified from the command line to offer additional flexibility. For example, the following command will fit a normative model to the same data, but without standardizing the data first and additionally writing out model coefficients (this is not done by default because they can use a lot of disk space). + +.. code-block:: bash + + python normative.py -c cov.txt -k 5 -a blr resp.txt standardize=False savemodel=True + + +A full set of keyword arguments is provided in the table below. At a minimum, a set of responses and covariates must be provided and either the corresponding number of cross-validation folds or a set of test covariates. + +**Table 2:** Keywords and command line arguments +.. list-table:: + :widths: 50 50 50 + :header-rows: 1 + + * -keyword + -Command line shortcut + -Description + * -covfunc + --c filename + -Covariate file + * -cvfolds + --k num_folds + -Number of cross-validation folds + * -testcov + --t filename + -Test covariates + * -testresp + --r filename + -Test responses + * maskfile + --m filename + -mask to apply to the response variables (nifti/cifti only) + * -alg + --a algorithm + -Estimation algorithm: 'gpr' (default), 'blr', 'np', 'hbr' or 'rfa'. See table above. + * -function + --f function + -function to call (estimate, predict, transfer, extend). See below + * -standardize + --s (skip) + -Standardize the covariates and response variables using the training data + * -configparam + --x config + -Pass the value of config to the estimation algorithm (deprecated) + * -outputsuffix + * - + -Suffix to apply to the output variables + * -saveoutput + - + - Write output (default = True) + * -savemodel + - + -Save the model coefficients and meta-data (default = False) + * -warp + - + -Warping function to apply to the responses (blr only) + +Basic usage (scripted) +**************************** + +The same can be done by importing the estimate function from normative.py. For example, the following code snippet will: (i) mask the nifti data specified in resp_train.nii.gz using the mask specified (which must have the same voxel size as the response variables) (ii) fit a linear normative model to each voxel, (iii) apply this to make predictions using the test covariates and (iv) compute deviation scores and error metrics by comparing against the true test response variables. + +.. code-block:: python + + from pcntoolkit.normative import estimate + + # estimate a normative model + estimate("cov_train.txt", "resp_train.nii.gz", maskfile="mask.nii.gz", \ + testresp="resp_test.nii.gz", testcov="cov_test.txt", alg="blr") + + +The estimate function does all these operations in a single step. In some cases it may be desirable to separate these steps. For example, if a normative model has been estimated on a large dataset, it may be desirable to save the model before applying it to a new dataset (e.g. from a a different site). For example, the following code snippet will first fit a model, then apply it to a set of dummy covariates so that the normative model can be plotted + +.. code-block:: python + + from pcntoolkit.normative import estimate, predict + + # fit a normative model, using training covariates and responses + # then apply to test dataset. Saved with file suffix '_estimate' + estimate(cov_file_tr, resp_file_tr, testresp=resp_file_te, \ + testcov=cov_file_te, alg='blr', optimizer = 'powell', \ + savemodel=True, standardize = False) + + # make predictions on a set of dummy covariates (with no responses) + # Saved with file suffix '_predict' + yhat, s2 = predict(cov_file_dummy) + +For further information, see the [developer documentation](https://amarquand.github.io/PCNtoolkit/doc/build/html/modindex.html#module-normative). The same can be achieved from the command line, using te ``-f`` argument, for example, by specifying ``-f predict``. + +Paralellising estimation to speed things up +********************************************** + +Normative model estimation is typically quite computationally expensive, especially for large datasets. This is exacerbated by high-resolution data (e.g. voxelwise data). For such cases normative model estimation can be paralellised across multiple compute nodes which can be achieved using the ``normative_parallel.py`` script. This involves splitting the response matrix into a set of batches, each of a specified size, i.e.: + +.. figure:: ./nm_parallel.png + :height: 300px + :align: center + +Each of these are then submitted to a cluster and reassembled once the cluster jobs have been completed. The following code snippet illustrates this procedure: + +.. code-block:: python + + from pcntoolkit.normative_parallel import execute_nm, collect_nm, delete_nm + + # General config parameters + normative_path = '//pcntoolkit/normative.py' + python_path='//bin/python' + working_dir = '//' + log_dir = '//' + + # cluster paramateters + job_name = 'nm_demo' # name for the cluster job + batch_size = 10 # number of models (e.g. voxels) per batch + memory = '4gb' # memory required + duration = '01:00:00' # walltime + cluster = 'torque' + + # fit the model. Specifying binary=True means results will be stored in .pkl format + execute_nm(working_dir, python_path, normative_path, job_name, cov_file.txt, \ + resp_file.pkl, batch_size, memory, duration, cluster_spec=cluster, \ + cv_folds=2, log_path=log_dir, binary=True) + + # wait until jobs complete ... + + # reassemble results + collect_nm(working_dir, job_name, collect=True, binary=True) + + # remove temporary files + delete_nm(working_dir, binary=True) + + +At the present time, only ASCII and pickle format are supported using normative parallel. Note also that it may be necessary to customise the script to support your local cluster architecture. This can be done using fairly obvious modifications to the ``execute_nm()`` function. \ No newline at end of file diff --git a/doc/build/html/_sources/pages/tutorial_CPC2020.rst.txt b/doc/build/html/_sources/pages/tutorial_CPC2020.rst.txt new file mode 100644 index 00000000..7ac969d8 --- /dev/null +++ b/doc/build/html/_sources/pages/tutorial_CPC2020.rst.txt @@ -0,0 +1,427 @@ +Gaussian Process Regression +============================== + +Created by `Saige Rutherford `_, `Thomas Wolfers `_, `Mariam Zabihi `_ + +View on `GitHub `_ + +Run in `Google Colab `_ + +\______________________________________________________________________________\_ + +Background Story +**************************** + +Morten and Ingrid are concerned about the health of their father, +Nordan. He recently turned 65 years. A few months ago he could not find +his way home. Together, they visit a neurologist/psychiatrist to conduct +a number of cognitive tests. However, those tests were inconclusive. +While Nordan has a relatively low IQ it could not explain his trouble +returning home. + +Recently, the family heard about a new screening technique called +normative modeling with which one can place individuals in reference to +a population norm on for instance measures such as brain volume. Nordan +would like to undertake this procedure to better know what is going on +and to potentially find targets for treatment. Therefore, the family +booked an appointment with you, the normative modeling specialist. To +find out what is going on you compare Nordan’s hyppocampus to the norm +and to a group of persons with Dementia disorders, who have a similar +IQ, age as well as the same sex as Nordan. + +Do your best to get as far as you can. However, you do not need to feel +bad if you cannot complete everything during the tutorial. + +Task 0: Load data and install the pcntoolkit +***************************************************** + +.. code:: ipython3 + + #install normative modeling + !pip install pcntoolkit + +**Option 1:** Connect your Google Drive account, and load data from +Google Drive. Having Google Drive connected will allow you to save any +files created back to your Drive folder. This step will require you to +download the csv files from +`Github `__ +to your computer, and then make a folder in your Google Drive account +and upload the csv files to this folder. + +.. code:: ipython3 + + from google.colab import drive + drive.mount('/content/drive') + + #change dir to data on your google drive + import os + os.chdir('drive/My Drive/name-of-folder-where-you-uploaded-csv-files-from-Github/') #Change this path to match the path to your data in Google Drive + + # code by T. Wolfers + +**Option 2:** Import the files directly from Github, and skip adding +them to Google Drive. + +.. code:: ipython3 + + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_demographics.csv + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_demographics_nordan.csv + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_features.csv + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_features_nordan.csv + + # code by S. Rutherford + +TASK 1: Format input data +********************************* + +You have four files. The features and demographics file for the +normsample and two files of the same name for Nordan your test sample. +As one of your coworkers has done the preporcessing and quality control +there are more subjects in the demographics file than in the features +file of the norm sample. Please select the overlap of participants +between those two files. + +*Question for your understanding:* + +1) Why do we have to select the overlap between participants in terms of + featrues and demographics? + +.. code:: ipython3 + + import pandas as pd + + # read in the files. + norm_demographics = pd.read_csv('cpc_camcan_demographics.csv', + sep= ",", + index_col = 0) + norm_features = pd.read_csv('cpc_camcan_features.csv', + sep=",", + index_col = 0) + + # check columns through print [there are other better options] + print(norm_demographics) + print(norm_features) + + # find overlap in terms of participants between norm_sample_features and + # norm_sample_demographics + + norm_demographics_features = pd.concat([norm_demographics, norm_features], + axis = 1, + join = 'inner') # inner checks overlap + # outer combines + print(norm_demographics_features) + + # code by T. Wolfers + +TASK 2: Prepare the covariate_normsample and testresponse_normsample file. +********************************************************************************** + +As mentioned in the introductory presentation those files need a +specific format and the entries need to be seperated by spaces. Use +whatever method you know to prepare those files based on the data +provided in TASK 1. Save those files in .txt format in your drive. Also +get rid of the column names and participant IDs. + +Given that we only have limited time in this practical we have to make a +selection for the features based on your prior knowledge. With the +information in mind that Nordan does not remember his way home, which +subfield of the hyppocampus is probably a good target for the +investigations? Select a maximum of four hyppocampal regions as +features. + +NOTE: Normative modeling is a screening tool we just make this selection +due to time constraints, in reality we build these models on millions of +putative biomarkers that are not restricted to brain imaging. + +*Qestions for your understanding:* + +2) What is the requirement for the features in terms of variable + properties (e.g. dicotomous or continous)? 3) What is the requirement + for the covariates in terms of these properties? 4) What are the + requirements for both together? 5) How does this depent on the + algorithm used? + +.. code:: ipython3 + + # perpare covariate_normsample for sex and age + covariate_normsample = norm_demographics_features[['sex', + 'age']] + + covariate_normsample.to_csv('covariate_normsample.txt', + sep = ' ', + header = False, + index = False) + + # perpare features_normsample for relevant hyppocampal subfields + features_normsample = norm_demographics_features[['left_CA1', + 'left_CA3', + 'right_CA1', + 'right_CA3']] + + features_normsample.to_csv('features_normsample.txt', + sep = ' ', + header = False, + index = False) + + # code by T. Wolfers + +TASK 3: Estimate normative model +*************************************** + +Once you have prepared and saved all the necessary files. Look at the +pcntoolkit for running normative modeling. Select an appropritate method +set up the toolkit and run your analyses using 2-fold cross validation +in the normsample. Change the output suffix from estimate to ’_2fold’. + +HINT: You primarily need the estimate function. + +SUGGESTION: While this process is running you can go to the next TASK 4, +you will have no doubt when it is correctly running. + +*Question for your understaning:* + +6) What does cvfolds mean and why do we use it? 7) What is the output of + the estimate function and what does it mean? + +.. code:: ipython3 + + import pcntoolkit as pcn + + # run normative modeling using 2-fold cross-validation + + pcn.normative.estimate(covfile = 'covariate_normsample.txt', + respfile = 'features_normsample.txt', + cvfolds = 2, + alg = 'gpr', + outputsuffix = '_2fold') + + # code by T. Wolfers + +TASK 4: Estimate the forward model of the normative model +***************************************************************** + +In order to visulize the normative trajectories you first need to run +the forward model. To this end you need to set up an appropriate +covariate_forwardmodel file that covers the age range appropriately for +both sexes. Save this file as .txt . Then you can input the files you +made in TASK 1 as well as the file you made now and run the forward +model using the appropriate specifications. + +*Question for your understaning:* + +8) What is yhat and ys2? 9) Why does the output of the forward model + does not inlcude the Z-scores? + +.. code:: ipython3 + + # create covariate_forwardmodel.txt file + covariate_forwardmodel = {'sex': [0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1], + 'age': [20, 30, 40, 50, 60, 70, 80, + 20, 30, 40, 50, 60, 70, 80]} + covariate_forwardmodel = pd.DataFrame(data=covariate_forwardmodel) + + covariate_forwardmodel.to_csv('covariate_forwardmodel.txt', + sep = ' ', + header = False, + index = False) + + # estimate forward model + pcn.normative.estimate(covfile = 'covariate_normsample.txt', + respfile = 'features_normsample.txt', + testcov = 'covariate_forwardmodel.txt', + cvfolds = None, + alg = 'gpr', + outputsuffix = '_forward') + + # code by T. Wolfers + +TASK 5: Visualize forward model +*************************************** + +Visualize the forward model of the normative model similar to the figure +below. + +.. figure:: nm_plot.jpeg + +HINT: First create a function that calculates the confidence intervals +and then plot yhat, y2 of the forward model. Finally, plot the data of +individual participants. + +.. code:: ipython3 + + import numpy as np + import matplotlib.pyplot as plt + + # confidence interval calculation at x_forward + def confidence_interval(s2,x,z): + CI=np.zeros((len(x_forward),4)) + for i,xdot in enumerate(x_forward): + ci_inx=np.isin(x,xdot) + S2=s2[ci_inx] + S_hat=np.mean(S2,axis=0) + n=S2.shape[0] + CI[i,:]=z*np.power(S_hat/n,.5) + return CI + + + feature_names=['left_CA1','left_CA3','right_CA1','right_CA3'] + sex_covariates=[ 'Female','Male'] + # Creating plots for Female and male + for i,sex in enumerate(sex_covariates): + #forward model data + forward_yhat = pd.read_csv('yhat_forward.txt', sep = ' ', header=None) + yhat_forward=forward_yhat.values + yhat_forward=yhat_forward[7*i:7*(i+1)] + x_forward=[20, 30, 40, 50, 60, 70, 80] + + # Find the index of the data exclusively for one sex. Female:0, Male: 1 + inx=np.where(covariate_normsample.sex==i)[0] + x=covariate_normsample.values[inx,1] + # actual data + y = pd.read_csv('features_normsample.txt', sep = ' ', header=None) + y=y.values[inx] + # confidence Interval yhat+ z *(std/n^.5)-->.95 % CI:z=1.96, 99% CI:z=2.58 + s2= pd.read_csv('ys2_2fold.txt', sep = ' ', header=None) + s2=s2.values[inx] + + CI_95=confidence_interval(s2,x,1.96) + CI_99=confidence_interval(s2,x,2.58) + + # Creat a trejactroy for each point + for j,name in enumerate(feature_names): + fig=plt.figure() + ax=fig.add_subplot(111) + ax.plot(x_forward,yhat_forward[:,j], linewidth=4, label='Normative trejactory') + + + ax.plot(x_forward,CI_95[:,j]+yhat_forward[:,j], linewidth=2,linestyle='--',c='g', label='95% confidence interval') + ax.plot(x_forward,-CI_95[:,j]+yhat_forward[:,j], linewidth=2,linestyle='--',c='g') + + ax.plot(x_forward,CI_99[:,j]+yhat_forward[:,j], linewidth=1,linestyle='--',c='k', label='99% confidence interval') + ax.plot(x_forward,-CI_99[:,j]+yhat_forward[:,j], linewidth=1,linestyle='--',c='k') + + ax.scatter(x,y[:,j],c='r', label=name) + plt.legend(loc='upper left') + plt.title('Normative trejectory of' +name+' in '+sex+' cohort') + plt.show() + plt.close() + + # code by M. Zabihi + +TASK 6: Apply the normative model to Nordan’s data and the dementia patients. +************************************************************************************ + +.. code:: ipython3 + + # read in Nordan's as well as the patient's demographics and features + demographics_nordan = pd.read_csv('cpc_camcan_demographics_nordan.csv', + sep= ",", + index_col = 0) + features_nordan = pd.read_csv('cpc_camcan_features_nordan.csv', + sep=",", + index_col = 0) + + # create a covariate file for Nordan's as well as the patient's demograhpics + covariate_nordan = demographics_nordan[['sex', + 'age']] + covariate_nordan.to_csv('covariate_nordan.txt', + sep = ' ', + header = False, + index = False) + + # create the corresponding feature file + features_nordan = features_nordan[['left_CA1', + 'left_CA3', + 'right_CA1', + 'right_CA3']] + + features_nordan.to_csv('features_nordan.txt', + sep = ' ', + header = False, + index = False) + + # apply normative modeling + pcn.normative.estimate(covfile = 'covariate_normsample.txt', + respfile = 'features_normsample.txt', + testcov = 'covariate_nordan.txt', + testresp = 'features_nordan.txt', + cvfolds = None, + alg = 'gpr', + outputsuffix = '_nordan') + + # code by T. Wolfers + +TASK 7: In which hyppocampal subfield(s) does Nordan deviate extremely? +******************************************************************************* + +No coding necessary just create a presentation which includes +recommendations to Nordan and his family. Use i) \|Z\| > 3.6 ii) \|Z\| > +1.96 as definitions for extreme normative deviations. + +TASK 8 (OPTIONAL): Implement a function that calculates percentage change. +********************************************************************************** + +Percentage change = :math:`\frac{x1 - x2}{|x2|}*100` + +.. code:: ipython3 + + # function that calculates percentage change + def calculate_percentage_change(x1, x2): + percentage_change = ((x1 - x2) / abs(x2)) * 100 + return percentage_change + + # code by T. Wolfers + +TASK 9 (OPTIONAL): Visualize percent change +**************************************************** + +Plot the prercentage change in Yhat of the forward model in reference to +age 20. Do that for both sexes seperately. + +.. code:: ipython3 + + import matplotlib.pyplot as plt + + forward_yhat = pd.read_csv('yhat_forward.txt', sep = ' ', header=None) + + # You can indicate here which hypocampal subfield you like to visualize + hyppocampal_subfield = 0 + + percentage_change_female = [] + percentage_change_male = [] + count = 0 + lengths = len(forward_yhat[hyppocampal_subfield]) + for entry in forward_yhat[hyppocampal_subfield]: + if count > 0 and count < 7: + loop_percentage_change_female = calculate_percentage_change(entry, + forward_yhat.iloc[0, + hyppocampal_subfield]) + percentage_change_female.append(loop_percentage_change_female) + elif count > 7: + loop_percentage_change_male = calculate_percentage_change(entry, + forward_yhat.iloc[9, + hyppocampal_subfield]) + percentage_change_male.append(loop_percentage_change_male) + count = count + 1 + + names = ['30 compared to 20 years', + '40 compared to 20 years', + '50 compared to 20 years', + '60 compared to 20 years', + '70 compared to 20 years', + '80 compared to 20 years'] + + # females + plt.subplot(121) + plt.bar(names, percentage_change_female) + plt.xticks(rotation=90) + plt.ylim(-20, 2) + + # males + plt.subplot(122) + plt.bar(names, percentage_change_male) + plt.xticks(rotation=90) + plt.ylim(-20, 2) + + # code by T. Wolfers diff --git a/doc/build/html/_sources/pages/tutorial_HBR.rst.txt b/doc/build/html/_sources/pages/tutorial_HBR.rst.txt new file mode 100644 index 00000000..dd67ee33 --- /dev/null +++ b/doc/build/html/_sources/pages/tutorial_HBR.rst.txt @@ -0,0 +1,339 @@ +Hierarchical Bayesian Regression +====================================================================================== + +Hierarchical Bayesian Regression Normative Modelling and Transfer onto unseen site. + +This notebook will go through basic data preparation (training and +testing set, `see Saige’s +tutorial `__ +on Normative Modelling for more detail), the actual training of the +models, and will finally describe how to transfer the trained models +onto unseen sites. The approach is described in detail in these papers: + +- `Kia et al 2020 `_. +- `Kia et al 2021 `_. + +View on `GitHub `_ + +While we run everything on a single compute node here, for larger datasets, it is probably desirbel to paralelize this using the normative_parallel functionality. + +Run in `Google Colab `_ + + +Created by `Saige Rutherford `__, adapted/edited by Andre Marquand and Pierre Berthet + + +.. container:: + +Step 0: Install necessary libraries & grab data files +******************************************************* + + +.. code:: ipython3 + + ! pip install numpy scipy arviz pymc3 matplotlib pandas + ! pip uninstall -y Theano-PyMC # conflicts with Theano on some environments + ! pip install pcntoolkit==0.19 + +For this tutorial we will use data from the `Functional Connectom +Project FCON1000 `__ to create a +multi-site dataset. + +The dataset contains some cortical measures (eg thickness), processed by +Freesurfer 6.0, and some covariates (eg age, site, gender). + +First we import the required package, and create a working directory. + +.. code:: ipython3 + + import os + import pandas as pd + import pcntoolkit as ptk + import numpy as np + import pickle + from matplotlib import pyplot as plt + +.. code:: ipython3 + + processing_dir = "HBR_demo/" # replace with a path to your working directory + if not os.path.isdir(processing_dir): + os.makedirs(processing_dir) + os.chdir(processing_dir) + processing_dir = os.getcwd() + +Overview +^^^^^^^^ + +Here we get the FCON dataset, remove the ICBM site for later transfer, +assign some site id to the different scanner sites and print an overview +of the left hemisphere mean raw cortical thickness as a function of age, +color coded by the various sites: + +.. code:: ipython3 + + fcon = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000.csv') + + icbm = fcon.loc[fcon['site'] == 'ICBM'] + icbm['sitenum'] = 0 + fcon = fcon.loc[fcon['site'] != 'ICBM'] + + sites = fcon['site'].unique() + fcon['sitenum'] = 0 + + f, ax = plt.subplots(figsize=(12, 12)) + + for i,s in enumerate(sites): + idx = fcon['site'] == s + fcon['sitenum'].loc[idx] = i + + print('site',s, sum(idx)) + ax.scatter(fcon['age'].loc[idx], fcon['lh_MeanThickness_thickness'].loc[idx]) + + ax.legend(sites) + ax.set_ylabel('LH mean cortical thickness [mm]') + ax.set_xlabel('age') + + +Step 1: Prepare training and testing sets +****************************************** + +Then we randomly split half of the samples (participants) to be either +in the training or in the testing samples. We do this for the remaing +FCON dataset and for the ICBM data. The transfer function will also +require a training and a test sample. + +The numbers of samples per sites used for training and for testing are +then displayed. + +.. code:: ipython3 + + tr = np.random.uniform(size=fcon.shape[0]) > 0.5 + te = ~tr + + fcon_tr = fcon.loc[tr] + fcon_te = fcon.loc[te] + + tr = np.random.uniform(size=icbm.shape[0]) > 0.5 + te = ~tr + + icbm_tr = icbm.loc[tr] + icbm_te = icbm.loc[te] + + print('sample size check') + for i,s in enumerate(sites): + idx = fcon_tr['site'] == s + idxte = fcon_te['site'] == s + print(i,s, sum(idx), sum(idxte)) + + # Uncomment the following lines if you want to keep a defined version of the sets + # fcon_tr.to_csv('/Users/andmar/data/sairut/data/fcon1000_tr.csv') + # fcon_te.to_csv('/Users/andmar/data/sairut/data/fcon1000_te.csv') + # icbm_tr.to_csv('/Users/andmar/data/sairut/data/fcon1000_icbm_tr.csv') + # icbm_te.to_csv('/Users/andmar/data/sairut/data/fcon1000_icbm_te.csv') + +Otherwise you can just load these pre defined subsets: + +.. code:: ipython3 + + # Optional + fcon_tr = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_tr.csv') + fcon_te = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_te.csv') + icbm_tr = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_icbm_tr.csv') + icbm_te = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_icbm_te.csv') + +Step 2: Configure HBR inputs: covariates, measures and batch effects +********************************************************************* + +We will here only use the mean cortical thickness for the Right and Left +hemisphere: two idps. + +.. code:: ipython3 + + idps = ['rh_MeanThickness_thickness','lh_MeanThickness_thickness'] + +As input to the model, we need covariates (used to describe predictable +source of variability (fixed effects), here ‘age’), measures (here +cortical thickness on two idps), and batch effects (random source of +variability, here ‘scanner site’ and ‘sex’). + +``X`` corresponds to the covariate(s) + +``Y`` to the measure(s) + +``batch_effects`` to the random effects + +We need these values both for the training (``_train``) and for the +testing set (``_test``). + +.. code:: ipython3 + + X_train = (fcon_tr['age']/100).to_numpy(dtype=float) + Y_train = fcon_tr[idps].to_numpy(dtype=float) + batch_effects_train = fcon_tr[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_train.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_train), file) + with open('Y_train.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_train), file) + with open('trbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_train), file) + + + X_test = (fcon_te['age']/100).to_numpy(dtype=float) + Y_test = fcon_te[idps].to_numpy(dtype=float) + batch_effects_test = fcon_te[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_test.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_test), file) + with open('Y_test.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_test), file) + with open('tsbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_test), file) + + # a simple function to quickly load pickle files + def ldpkl(filename: str): + with open(filename, 'rb') as f: + return pickle.load(f) + +Step 3: Files and Folders grooming +*************************************** + +.. code:: ipython3 + + respfile = os.path.join(processing_dir, 'Y_train.pkl') # measurements (eg cortical thickness) of the training samples (columns: the various features/ROIs, rows: observations or subjects) + covfile = os.path.join(processing_dir, 'X_train.pkl') # covariates (eg age) the training samples (columns: covariates, rows: observations or subjects) + + testrespfile_path = os.path.join(processing_dir, 'Y_test.pkl') # measurements for the testing samples + testcovfile_path = os.path.join(processing_dir, 'X_test.pkl') # covariate file for the testing samples + + trbefile = os.path.join(processing_dir, 'trbefile.pkl') # training batch effects file (eg scanner_id, gender) (columns: the various batch effects, rows: observations or subjects) + tsbefile = os.path.join(processing_dir, 'tsbefile.pkl') # testing batch effects file + + output_path = os.path.join(processing_dir, 'Models/') # output path, where the models will be written + log_dir = os.path.join(processing_dir, 'log/') # + if not os.path.isdir(output_path): + os.mkdir(output_path) + if not os.path.isdir(log_dir): + os.mkdir(log_dir) + + outputsuffix = '_estimate' # a string to name the output files, of use only to you, so adapt it for your needs. + +Step 4: Estimating the models +****************************** + +Now we have everything ready to estimate the normative models. The +``estimate`` function only needs the training and testing sets, each +divided in three datasets: covariates, measures and batch effects. We +obviously specify ``alg=hbr`` to use the hierarchical bayesian +regression method, well suited for the multi sites datasets. The +remaining arguments are basic data management: where the models, logs, +and output files will be written and how they will be named. + +.. code:: ipython3 + + ptk.normative.estimate(covfile=covfile, + respfile=respfile, + tsbefile=tsbefile, + trbefile=trbefile, + alg='hbr', + log_path=log_dir, + binary=True, + output_path=output_path, testcov= testcovfile_path, + testresp = testrespfile_path, + outputsuffix=outputsuffix, savemodel=True) + +Here some analyses can be done, there are also some error metrics that +could be of interest. This is covered in step 6 and in `Saige’s +tutorial `__ +on Normative Modelling. + +Step 5: Transfering the models to unseen sites +************************************************* + +Similarly to what was done before for the FCON data, we also need to +prepare the ICBM specific data, in order to run the transfer function: +training and testing set of covariates, measures and batch effects: + +.. code:: ipython3 + + X_adapt = (icbm_tr['age']/100).to_numpy(dtype=float) + Y_adapt = icbm_tr[idps].to_numpy(dtype=float) + batch_effects_adapt = icbm_tr[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_adaptation.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_adapt), file) + with open('Y_adaptation.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_adapt), file) + with open('adbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_adapt), file) + + # Test data (new dataset) + X_test_txfr = (icbm_te['age']/100).to_numpy(dtype=float) + Y_test_txfr = icbm_te[idps].to_numpy(dtype=float) + batch_effects_test_txfr = icbm_te[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_test_txfr.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_test_txfr), file) + with open('Y_test_txfr.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_test_txfr), file) + with open('txbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_test_txfr), file) + + +.. code:: ipython3 + + respfile = os.path.join(processing_dir, 'Y_adaptation.pkl') + covfile = os.path.join(processing_dir, 'X_adaptation.pkl') + testrespfile_path = os.path.join(processing_dir, 'Y_test_txfr.pkl') + testcovfile_path = os.path.join(processing_dir, 'X_test_txfr.pkl') + trbefile = os.path.join(processing_dir, 'adbefile.pkl') + tsbefile = os.path.join(processing_dir, 'txbefile.pkl') + + log_dir = os.path.join(processing_dir, 'log_transfer/') + output_path = os.path.join(processing_dir, 'Transfer/') + model_path = os.path.join(processing_dir, 'Models/') # path to the previously trained models + outputsuffix = '_transfer' # suffix added to the output files from the transfer function + +Here, the difference is that the transfer function needs a model path, +which points to the models we just trained, and new site data (training +and testing). That is basically the only difference. + +.. code:: ipython3 + + yhat, s2, z_scores = ptk.normative.transfer(covfile=covfile, + respfile=respfile, + tsbefile=tsbefile, + trbefile=trbefile, + model_path = model_path, + alg='hbr', + log_path=log_dir, + binary=True, + output_path=output_path, + testcov= testcovfile_path, + testresp = testrespfile_path, + outputsuffix=outputsuffix, + savemodel=True) + + +And that is it, you now have models that benefited from prior knowledge +about different scanner sites to learn on unseen sites. + +Step 6: Interpreting model performance +***************************************** + +Output evaluation metrics definitions + +================= ====================================================================================================== +**key value** **Description** +----------------- ------------------------------------------------------------------------------------------------------ +yhat predictive mean +ys2 predictive variance +nm normative model +Z deviance scores +Rho Pearson correlation between true and predicted responses +pRho parametric p-value for this correlation +RMSE root mean squared error between true/predicted responses +SMSE standardised mean squared error +EV explained variance +MSLL mean standardized log loss `See page 23 `_ +================= ====================================================================================================== diff --git a/doc/build/html/_sources/pages/tutorial_ROIcorticalthickness.rst.txt b/doc/build/html/_sources/pages/tutorial_ROIcorticalthickness.rst.txt new file mode 100644 index 00000000..38ac31e8 --- /dev/null +++ b/doc/build/html/_sources/pages/tutorial_ROIcorticalthickness.rst.txt @@ -0,0 +1,724 @@ +Bayesian Linear Regression +============================ + +Normative Modeling Tutorial Using Multi-Site Cortical Thickness Data and Bayesian Linear Regression. + +This notebook will prepare the data for normative modelling (assembling +data matrices from different datasets, preparing the covariates etc). + +View on `GitHub `_ + +Run in `Google Colab `_ + +Created by `Saige Rutherford `__ + +.. raw:: html + +
+ +.. raw:: html + +
+ +Step 0: Install necessary libraries & grab data files +******************************************************* + +.. code:: ipython3 + + ! git clone https://github.com/predictive-clinical-neuroscience/PCNtoolkit-demo.git + +.. code:: ipython3 + + import os + +.. code:: ipython3 + + # set this path to the git cloned PCNtoolkit-demo repository --> Uncomment whichever line you need for either running on your own computer or on Google Colab. + #os.chdir('/Users/saigerutherford/repos/PCNtoolkit-demo/') # if running on your own computer, use this line (but obvi change the path) + #os.chdir('PCNtoolkit-demo/') # if running on Google Colab, use this line + +.. code:: ipython3 + + ! pip install -r requirements.txt + + +Step 1: Prepare covariate data +******************************** + +For this tutorial we will use data from the `Human Connectome Project +Young Adult +study `__, +`CAMCAN `__, and +`IXI `__ to create a +multi-site dataset. + +Our first step is to prepare and combine the covariate (age & sex) data +from each site. + +.. code:: ipython3 + + import pandas as pd + import numpy as np + import matplotlib.pyplot as plt + import seaborn as sns + import joypy + from sklearn.model_selection import train_test_split + from pcntoolkit.normative import estimate, evaluate + from pcntoolkit.utils import create_bspline_basis, compute_MSLL + +.. code:: ipython3 + + hcp = pd.read_csv('data/HCP1200_age_gender.csv') + cam = pd.read_csv('data/cam_age_gender.csv') + ixi = pd.read_csv('data/IXI_age_gender.csv') + +.. code:: ipython3 + + cam_hcp = pd.merge(hcp, cam, how='outer') + +.. code:: ipython3 + + cov = pd.merge(cam_hcp, ixi, how='outer') + +.. code:: ipython3 + + sns.set(font_scale=1.5, style='darkgrid') + +.. code:: ipython3 + + sns.displot(cov, x="age", hue="site", multiple="stack", height=6) + +.. code:: ipython3 + + cov.groupby(['site']).describe() + +Step 2: Prepare brain data +****************************** + +Next we will format and combine the MRI data. We are using cortical +thickness maps that are created by running recon-all from Freesurfer 6. +We need to merge together the left and right hemisphere text files for +each site, and then combine the different sites into a single dataframe. +We reduce the dimensionality of our data by using ROIs from the +Desikan-Killiany atlas. + +Here is some psuedo-code (run from a terminal in the folder that has all +subject’s recon-all output folders) that was used to extract these ROIs: + +.. code:: ipython3 + + export SUBJECTS_DIR=/path/to/study/freesurfer_data/ + aparcstats2table --subject sub-* --hemi lh --meas thickness --tablefile HCP1200_aparc_lh_thickness.txt + aparcstats2table --subject sub-* --hemi rh --meas thickness --tablefile HCP1200_aparc_rh_thickness.txt + +.. code:: ipython3 + + cam = pd.read_csv('data/CAMCAN_aparc_thickness.csv') + hcpya = pd.read_csv('data/HCP1200_aparc_thickness.csv') + ixi = pd.read_csv('data/IXI_aparc_thickness.csv') + +.. code:: ipython3 + + hcpya_cam = pd.merge(hcpya, cam, how='outer') + +.. code:: ipython3 + + brain_all = pd.merge(ixi, hcpya_cam, how='outer') + +We also want to include the `Euler +number `__ as a +covariate. So we extracted the euler number from each subject’s +recon-all output folder into a text file and we now need to format and +combine these into our brain dataframe. + +.. code:: ipython3 + + hcp_euler = pd.read_csv('data/hcp-ya_euler.csv') + cam_euler = pd.read_csv('data/cam_euler.csv') + ixi_euler = pd.read_csv('data/ixi_euler.csv') + +.. code:: ipython3 + + hcp_euler['site'] = 'hcp' + cam_euler['site'] = 'cam' + ixi_euler['site'] = 'ixi' + +.. code:: ipython3 + + hcp_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True) + cam_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True) + ixi_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True) + +.. code:: ipython3 + + hcp_euler.dropna(inplace=True) + cam_euler.dropna(inplace=True) + ixi_euler.dropna(inplace=True) + +.. code:: ipython3 + + hcp_euler['rh_euler'] = hcp_euler['rh_euler'].astype(int) + hcp_euler['lh_euler'] = hcp_euler['lh_euler'].astype(int) + cam_euler['rh_euler'] = cam_euler['rh_euler'].astype(int) + cam_euler['lh_euler'] = cam_euler['lh_euler'].astype(int) + ixi_euler['rh_euler'] = ixi_euler['rh_euler'].astype(int) + ixi_euler['lh_euler'] = ixi_euler['lh_euler'].astype(int) + +.. code:: ipython3 + + hcp_cam_euler = pd.merge(hcp_euler, cam_euler, how='outer') + +.. code:: ipython3 + + df_euler = pd.merge(ixi_euler, hcp_cam_euler, how='outer') + +Finally, we need to center the euler number for each site. The euler +number is very site-specific so in order to use the same exclusion +threshold across sites we need to center the site by subtracting the +site median from all subjects at a site. Then we will take the square +root and multiply by negative one and exclude any subjects with a square +root above 10. This choice of threshold is fairly random. If possible +all of your data should be visually inspected to verify that the data +inclusion is not too strict or too lenient. + +.. code:: ipython3 + + df_euler['avg_euler'] = df_euler[['lh_euler','rh_euler']].mean(axis=1) + +.. code:: ipython3 + + df_euler.groupby(by='site').median() + +.. code:: ipython3 + + df_euler['site_median'] = df_euler['site'] + +.. code:: ipython3 + + df_euler['site_median'] = df_euler['site_median'].replace({'hcp':-43,'cam':-61,'ixi':-56}) + +.. code:: ipython3 + + df_euler['avg_euler_centered'] = df_euler['avg_euler'] - df_euler['site_median'] + +.. code:: ipython3 + + df_euler['avg_euler_centered_neg'] = df_euler['avg_euler_centered']*-1 + +.. code:: ipython3 + + df_euler['avg_euler_centered_neg_sqrt'] = np.sqrt(np.absolute(df_euler['avg_euler_centered_neg'])) + +.. code:: ipython3 + + pd.set_option('display.max_rows', 500) + pd.set_option('display.max_columns', 500) + pd.set_option('display.width', 1000) + #create a color gradent function to be used in the colormap parameter + def color_gradient(x=0.0, start=(0, 0, 0), stop=(1, 1, 1)): + r = np.interp(x, [0, 1], [start[0], stop[0]]) + g = np.interp(x, [0, 1], [start[1], stop[1]]) + b = np.interp(x, [0, 1], [start[2], stop[2]]) + return r, g, b#show the table + #plot the figure + plt.figure(dpi=380) + fig, axes = joypy.joyplot(df_euler, column=['avg_euler_centered_neg_sqrt'], overlap=2.5, by="site", ylim='own', fill=True, figsize=(6,6) + , legend=False, xlabels=True, ylabels=True, colormap=lambda x: color_gradient(x, start=(.08, .45, .8),stop=(.8, .34, .44)) + , alpha=0.6, linewidth=.5, linecolor='w', fade=True) + plt.title('sqrt(-Euler Number), median centered', fontsize=18, color='black', alpha=1) + plt.xlabel('sqrt(-Euler number)', fontsize=14, color='black', alpha=1) + plt.ylabel('Site', fontsize=14, color='black', alpha=1) + plt.show + +.. code:: ipython3 + + brain = pd.merge(df_euler, brain_all, how='inner') + +.. code:: ipython3 + + len(brain) + +.. code:: ipython3 + + brain_good = brain.query('avg_euler_centered_neg_sqrt < 10') + +.. code:: ipython3 + + len(brain_good) + +We lose 63 subjects because they have a large euler number. + +Step 3: Combine covariate & cortical thickness dataframes +************************************************************* + +Even though the normative modeling code needs the covariate and features +(cortical thickness) in separate text files, we first need to merge them +together to make sure that we have the same subjects in each file and +that the rows (representing subjects) align. + +.. code:: ipython3 + + # make sure to use how="inner" so that we only include subjects that have data in both the covariate and the cortical thickness files + all_data = pd.merge(brain_good, cov, how='inner') + +Step 4: Format dataframes to run normative models +**************************************************** + +.. code:: ipython3 + + from sklearn.model_selection import train_test_split + +.. code:: ipython3 + + # Remove any subjects that have NaN variables in any of the columns + all_data.dropna(subset=['lh_bankssts_thickness', + 'lh_caudalanteriorcingulate_thickness', + 'lh_caudalmiddlefrontal_thickness', 'lh_cuneus_thickness', + 'lh_entorhinal_thickness', 'lh_fusiform_thickness', + 'lh_inferiorparietal_thickness', 'lh_inferiortemporal_thickness', + 'lh_isthmuscingulate_thickness', 'lh_lateraloccipital_thickness', + 'lh_lateralorbitofrontal_thickness', 'lh_lingual_thickness', + 'lh_medialorbitofrontal_thickness', 'lh_middletemporal_thickness', + 'lh_parahippocampal_thickness', 'lh_paracentral_thickness', + 'lh_parsopercularis_thickness', 'lh_parsorbitalis_thickness', + 'lh_parstriangularis_thickness', 'lh_pericalcarine_thickness', + 'lh_postcentral_thickness', 'lh_posteriorcingulate_thickness', + 'lh_precentral_thickness', 'lh_precuneus_thickness', + 'lh_rostralanteriorcingulate_thickness', + 'lh_rostralmiddlefrontal_thickness', 'lh_superiorfrontal_thickness', + 'lh_superiorparietal_thickness', 'lh_superiortemporal_thickness', + 'lh_supramarginal_thickness', 'lh_frontalpole_thickness', + 'lh_temporalpole_thickness', 'lh_transversetemporal_thickness', + 'lh_insula_thickness', 'lh_MeanThickness_thickness', + 'rh_bankssts_thickness', 'rh_caudalanteriorcingulate_thickness', + 'rh_caudalmiddlefrontal_thickness', 'rh_cuneus_thickness', + 'rh_entorhinal_thickness', 'rh_fusiform_thickness', + 'rh_inferiorparietal_thickness', 'rh_inferiortemporal_thickness', + 'rh_isthmuscingulate_thickness', 'rh_lateraloccipital_thickness', + 'rh_lateralorbitofrontal_thickness', 'rh_lingual_thickness', + 'rh_medialorbitofrontal_thickness', 'rh_middletemporal_thickness', + 'rh_parahippocampal_thickness', 'rh_paracentral_thickness', + 'rh_parsopercularis_thickness', 'rh_parsorbitalis_thickness', + 'rh_parstriangularis_thickness', 'rh_pericalcarine_thickness', + 'rh_postcentral_thickness', 'rh_posteriorcingulate_thickness', + 'rh_precentral_thickness', 'rh_precuneus_thickness', + 'rh_rostralanteriorcingulate_thickness', + 'rh_rostralmiddlefrontal_thickness', 'rh_superiorfrontal_thickness', + 'rh_superiorparietal_thickness', 'rh_superiortemporal_thickness', + 'rh_supramarginal_thickness', 'rh_frontalpole_thickness', + 'rh_temporalpole_thickness', 'rh_transversetemporal_thickness', + 'rh_insula_thickness', 'rh_MeanThickness_thickness','age','sex'], inplace=True) + +Separate the covariate & features into their own dataframes + +.. code:: ipython3 + + all_data_features = all_data[['lh_bankssts_thickness', + 'lh_caudalanteriorcingulate_thickness', + 'lh_caudalmiddlefrontal_thickness', 'lh_cuneus_thickness', + 'lh_entorhinal_thickness', 'lh_fusiform_thickness', + 'lh_inferiorparietal_thickness', 'lh_inferiortemporal_thickness', + 'lh_isthmuscingulate_thickness', 'lh_lateraloccipital_thickness', + 'lh_lateralorbitofrontal_thickness', 'lh_lingual_thickness', + 'lh_medialorbitofrontal_thickness', 'lh_middletemporal_thickness', + 'lh_parahippocampal_thickness', 'lh_paracentral_thickness', + 'lh_parsopercularis_thickness', 'lh_parsorbitalis_thickness', + 'lh_parstriangularis_thickness', 'lh_pericalcarine_thickness', + 'lh_postcentral_thickness', 'lh_posteriorcingulate_thickness', + 'lh_precentral_thickness', 'lh_precuneus_thickness', + 'lh_rostralanteriorcingulate_thickness', + 'lh_rostralmiddlefrontal_thickness', 'lh_superiorfrontal_thickness', + 'lh_superiorparietal_thickness', 'lh_superiortemporal_thickness', + 'lh_supramarginal_thickness', 'lh_frontalpole_thickness', + 'lh_temporalpole_thickness', 'lh_transversetemporal_thickness', + 'lh_insula_thickness', 'lh_MeanThickness_thickness', + 'rh_bankssts_thickness', 'rh_caudalanteriorcingulate_thickness', + 'rh_caudalmiddlefrontal_thickness', 'rh_cuneus_thickness', + 'rh_entorhinal_thickness', 'rh_fusiform_thickness', + 'rh_inferiorparietal_thickness', 'rh_inferiortemporal_thickness', + 'rh_isthmuscingulate_thickness', 'rh_lateraloccipital_thickness', + 'rh_lateralorbitofrontal_thickness', 'rh_lingual_thickness', + 'rh_medialorbitofrontal_thickness', 'rh_middletemporal_thickness', + 'rh_parahippocampal_thickness', 'rh_paracentral_thickness', + 'rh_parsopercularis_thickness', 'rh_parsorbitalis_thickness', + 'rh_parstriangularis_thickness', 'rh_pericalcarine_thickness', + 'rh_postcentral_thickness', 'rh_posteriorcingulate_thickness', + 'rh_precentral_thickness', 'rh_precuneus_thickness', + 'rh_rostralanteriorcingulate_thickness', + 'rh_rostralmiddlefrontal_thickness', 'rh_superiorfrontal_thickness', + 'rh_superiorparietal_thickness', 'rh_superiortemporal_thickness', + 'rh_supramarginal_thickness', 'rh_frontalpole_thickness', + 'rh_temporalpole_thickness', 'rh_transversetemporal_thickness', + 'rh_insula_thickness', 'rh_MeanThickness_thickness']] + +.. code:: ipython3 + + all_data_covariates = all_data[['age','sex','site']] + +Right now, the sites are coded in a single column using a string. We +need to instead dummy encode the site variable so that there is a column +for each site and the columns contain binary variables (0/1). Luckily +pandas has a nice built in function, ``pd.get_dummies`` to help us +format the site column this way! + +.. code:: ipython3 + + all_data_covariates = pd.get_dummies(all_data_covariates, columns=['site']) + +.. code:: ipython3 + + all_data['Average_Thickness'] = all_data[['lh_MeanThickness_thickness','rh_MeanThickness_thickness']].mean(axis=1) + +Take a sneak peak to see if there are any super obvious site effects. If +there were, we would see a large separation in the fitted regression +line for each site. + +.. code:: ipython3 + + sns.set_theme(style="darkgrid",font_scale=1.5) + c = sns.lmplot(data=all_data, x="age", y="Average_Thickness", hue="site", height=6) + plt.ylim(1.5, 3.25) + plt.xlim(15, 95) + plt.show() + +Create train/test split +----------------------- + +We will use 80% of the data for training and 20% for testing. We +stratify our train/test split using the site variable to make sure that +the train/test sets both contain data from all sites. The model wouldn’t +learn the site effects if all of the data from one site was only in the +test set. + +.. code:: ipython3 + + X_train, X_test, y_train, y_test = train_test_split(all_data_covariates, all_data_features, stratify=all_data['site'], test_size=0.2, random_state=42) + +Verify that your train & test arrays are the same size + +.. code:: ipython3 + + tr_cov_size = X_train.shape + tr_resp_size = y_train.shape + te_cov_size = X_test.shape + te_resp_size = y_test.shape + print("Train covariate size is: ", tr_cov_size) + print("Test covariate size is: ", te_cov_size) + print("Train response size is: ", tr_resp_size) + print("Test response size is: ", te_resp_size) + + +Save out each ROI to its own file: + +We setup the normative model so that for each Y (brain region) we fit a +separate model. While the estimate function in the pcntoolkit can handle +having all of the Y’s in a single text file, for this tutorial we are +going to organize our Y’s so that they are each in their own text file +and directory. + +.. code:: ipython3 + + os.chdir('/Users/saigerutherford/repos/PCNToolkit-demo/') + +.. code:: ipython3 + + cd data/ + +.. code:: ipython3 + + for c in y_train.columns: + y_train[c].to_csv('resp_tr_' + c + '.txt', header=False, index=False) + +.. code:: ipython3 + + X_train.to_csv('cov_tr.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + y_train.to_csv('resp_tr.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + for c in y_test.columns: + y_test[c].to_csv('resp_te_' + c + '.txt', header=False, index=False) + +.. code:: ipython3 + + X_test.to_csv('cov_te.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + y_test.to_csv('resp_te.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + ! if [[ ! -e data/ROI_models/ ]]; then mkdir data/ROI_models; fi + ! if [[ ! -e data/covariate_files/ ]]; then mkdir data/covariate_files; fi + ! if [[ ! -e data/response_files/ ]]; then mkdir data/response_files; fi + +.. code:: ipython3 + + ! for i in `cat data/roi_dir_names`; do cd data/ROI_models; mkdir ${i}; cd ../../; cp resp_tr_${i}.txt data/ROI_models/${i}/resp_tr.txt; cp resp_te_${i}.txt data/ROI_models/${i}/resp_te.txt; cp cov_tr.txt data/ROI_models/${i}/cov_tr.txt; cp cov_te.txt data/ROI_models/${i}/cov_te.txt; done + +.. code:: ipython3 + + ! mv resp_*.txt data/response_files/ + +.. code:: ipython3 + + ! mv cov_t*.txt data/covariate_files/ + +Step 5: Run normative model +****************************** + +.. code:: ipython3 + + # set this path to wherever your ROI_models folder is located (where you copied all of the covariate & response text files to in Step 4) + data_dir = '/Users/saigerutherford/repos/PCNToolkit-demo/data/ROI_models/' + +.. code:: ipython3 + + # Create a list of all the ROIs you want to run a normative model for + roi_ids = ['lh_MeanThickness_thickness', + 'rh_MeanThickness_thickness', + 'lh_bankssts_thickness', + 'lh_caudalanteriorcingulate_thickness', + 'lh_superiorfrontal_thickness', + 'rh_superiorfrontal_thickness'] + +When we split the data into train and test sets, we did not reset the +index. This means that the row numbers in the train/test matrices are +still the same as before splitting the data. We will need the test set +row numbers of which subjects belong to which site in order to evaluate +per site performance metrics, so we need to reset the row numbers in the +train/test split matrices. + +.. code:: ipython3 + + x_col_names = ['age', 'sex', 'site_cam', 'site_hcp', 'site_ixi'] + X_train = pd.read_csv('data/covariate_files/cov_tr.txt', sep='\t', header=None, names=x_col_names) + X_test = pd.read_csv('data/covariate_files/cov_te.txt', sep='\t', header=None, names=x_col_names) + y_train = pd.read_csv('data/response_files/resp_tr.txt', sep='\t', header=None) + y_test = pd.read_csv('data/response_files/resp_te.txt', sep='\t', header=None) + +.. code:: ipython3 + + X_train.reset_index(drop=True, inplace=True) + X_test.reset_index(drop=True, inplace=True) + y_train.reset_index(drop=True, inplace=True) + y_test.reset_index(drop=True, inplace=True) + +Extract site indices: + +Get site ids so that we can evaluate the test metrics independently for +each site + +.. code:: ipython3 + + cam_idx = X_test.index[X_test['site_cam' ]== 1].to_list() + hcp_idx = X_test.index[X_test['site_hcp'] == 1].to_list() + ixi_idx = X_test.index[X_test['site_ixi'] == 1].to_list() + + # Save the site indices into a single list + sites = [cam_idx, hcp_idx, ixi_idx] + + # Create a list with sites names to use in evaluating per-site metrics + site_names = ['cam', 'hcp', 'ixi'] + +Basis expansion: + +Now, we set up a B-spline basis set that allows us to perform nonlinear +regression using a linear model. This basis is deliberately chosen to +not to be too flexible so that in can only model relatively slowly +varying trends. To increase the flexibility of the model you can change +the parameterisation (e.g. by adding knot points to the Bspline basis or +increasing the order of the interpolating polynomial). + +Note that in the neuroimaging literature, it is more common to use a +polynomial basis expansion for this. Piecewise polynomials like +B-splines are superior because they do not introduce a global curvature. +See the reference below for further information. + +`Primer on regression +splines `__ + +`Reference for why polynomials are a bad +idea `__ + +.. code:: ipython3 + + # Create a cubic B-spline basis (used for regression) + xmin = 10#16 # xmin & xmax are the boundaries for ages of participants in the dataset + xmax = 95#90 + B = create_bspline_basis(xmin, xmax) + + # create the basis expansion for the covariates for each of the + for roi in roi_ids: + print('Creating basis expansion for ROI:', roi) + roi_dir = os.path.join(data_dir, roi) + os.chdir(roi_dir) + + # create output dir + os.makedirs(os.path.join(roi_dir,'blr'), exist_ok=True) + + # load train & test covariate data matrices + X_tr = np.loadtxt(os.path.join(roi_dir, 'cov_tr.txt')) + X_te = np.loadtxt(os.path.join(roi_dir, 'cov_te.txt')) + + # add intercept column + X_tr = np.concatenate((X_tr, np.ones((X_tr.shape[0],1))), axis=1) + X_te = np.concatenate((X_te, np.ones((X_te.shape[0],1))), axis=1) + np.savetxt(os.path.join(roi_dir, 'cov_int_tr.txt'), X_tr) + np.savetxt(os.path.join(roi_dir, 'cov_int_te.txt'), X_te) + + # create Bspline basis set + Phi = np.array([B(i) for i in X_tr[:,0]]) + Phis = np.array([B(i) for i in X_te[:,0]]) + X_tr = np.concatenate((X_tr, Phi), axis=1) + X_te = np.concatenate((X_te, Phis), axis=1) + np.savetxt(os.path.join(roi_dir, 'cov_bspline_tr.txt'), X_tr) + np.savetxt(os.path.join(roi_dir, 'cov_bspline_te.txt'), X_te) + +Prepare output structures: + +.. code:: ipython3 + + # Create pandas dataframes with header names to save out the overall and per-site model evaluation metrics + blr_metrics = pd.DataFrame(columns = ['ROI', 'MSLL', 'EV', 'SMSE', 'RMSE', 'Rho']) + blr_site_metrics = pd.DataFrame(columns = ['ROI', 'site', 'y_mean', 'y_var', 'yhat_mean', 'yhat_var', 'MSLL', 'EV', 'SMSE', 'RMSE', 'Rho']) + +Estimate the normative models: + +In this step, we estimate the normative models one at a time. In +principle we could also do this on the whole data matrix at once +(e.g. with the response variables stored in a n_subjects x +n_brain_measures numpy array). However, doing it this way gives us some +extra flexibility in that it does not require that the subjects are +exactly the same for each of the brain measures. + +This code fragment will loop through each region of interest in the +roi_ids list (set a few code blocks above) using Bayesian linear +regression and evaluate the model on the independent test set. It will +then compute error metrics such as the explained variance, mean +standardized log loss and Pearson correlation between true and predicted +test responses separately for each scanning site. + +We supply the estimate function with a few specific arguments that are +worthy of commenting on: \* alg = ‘blr’ : specifies we should use +Bayesian linear regression \* optimizer = ‘powell’ : use Powell’s +derivative-free optimization method (faster in this case than L-BFGS) \* +savemodel = False : do not write out the final estimated model to disk +\* saveoutput = False : return the outputs directly rather than writing +them to disk \* standardize = False : Do not standardize the covariates +or response variables + +One important consideration is whether or not to standardize. Whilst +this generally only has a minor effect on the final model accuracy, it +has implications for the interpretation of models and how they are +configured. If the covariates and responses are both standardized, the +model will return standardized coefficients. If (as in this case) the +response variables are not standardized, then the scaling both +covariates and responses will be reflected in the estimated +coefficients. Also, under the linear modelling approach employed here, +if the coefficients are unstandardized and do not have a zero mean, it +is necessary to add an intercept column to the design matrix. This is +done in the code block above. + +.. code:: ipython3 + + # Loop through ROIs + for roi in roi_ids: + print('Running ROI:', roi) + roi_dir = os.path.join(data_dir, roi) + os.chdir(roi_dir) + + # configure the covariates to use. Change *_bspline_* to *_int_* to + cov_file_tr = os.path.join(roi_dir, 'cov_bspline_tr.txt') + cov_file_te = os.path.join(roi_dir, 'cov_bspline_te.txt') + + # load train & test response files + resp_file_tr = os.path.join(roi_dir, 'resp_tr.txt') + resp_file_te = os.path.join(roi_dir, 'resp_te.txt') + + # run a basic model + yhat_te, s2_te, nm, Z, metrics_te = estimate(cov_file_tr, + resp_file_tr, + testresp=resp_file_te, + testcov=cov_file_te, + alg = 'blr', + optimizer = 'powell', + savemodel = False, + saveoutput = False, + standardize = False) + # display and save metrics + print('EV=', metrics_te['EXPV'][0]) + print('RHO=', metrics_te['Rho'][0]) + print('MSLL=', metrics_te['MSLL'][0]) + blr_metrics.loc[len(blr_metrics)] = [roi, metrics_te['MSLL'][0], metrics_te['EXPV'][0], metrics_te['SMSE'][0], + metrics_te['RMSE'][0], metrics_te['Rho'][0]] + + # Compute metrics per site in test set, save to pandas df + # load true test data + X_te = np.loadtxt(cov_file_te) + y_te = np.loadtxt(resp_file_te) + y_te = y_te[:, np.newaxis] # make sure it is a 2-d array + + # load training data (required to compute the MSLL) + y_tr = np.loadtxt(resp_file_tr) + y_tr = y_tr[:, np.newaxis] + + for num, site in enumerate(sites): + y_mean_te_site = np.array([[np.mean(y_te[site])]]) + y_var_te_site = np.array([[np.var(y_te[site])]]) + yhat_mean_te_site = np.array([[np.mean(yhat_te[site])]]) + yhat_var_te_site = np.array([[np.var(yhat_te[site])]]) + + metrics_te_site = evaluate(y_te[site], yhat_te[site], s2_te[site], y_mean_te_site, y_var_te_site) + + site_name = site_names[num] + blr_site_metrics.loc[len(blr_site_metrics)] = [roi, site_names[num], + y_mean_te_site[0], + y_var_te_site[0], + yhat_mean_te_site[0], + yhat_var_te_site[0], + metrics_te_site['MSLL'][0], + metrics_te_site['EXPV'][0], + metrics_te_site['SMSE'][0], + metrics_te_site['RMSE'][0], + metrics_te_site['Rho'][0]] + +.. code:: ipython3 + + os.chdir(data_dir) + +.. code:: ipython3 + + # Save per site test set metrics variable to CSV file + blr_site_metrics.to_csv('blr_site_metrics.csv', index=False, index_label=None) + +.. code:: ipython3 + + # Save overall test set metrics to CSV file + blr_metrics.to_csv('blr_metrics.csv', index=False, index_label=None) + +Step 6: Interpreting model performance +***************************************** + +Output evaluation metrics definitions + +================= ====================================================================================================== +**key value** **Description** +----------------- ------------------------------------------------------------------------------------------------------ +yhat predictive mean +ys2 predictive variance +nm normative model +Z deviance scores +Rho Pearson correlation between true and predicted responses +pRho parametric p-value for this correlation +RMSE root mean squared error between true/predicted responses +SMSE standardised mean squared error +EV explained variance +MSLL mean standardized log loss `See page 23 `_ +================= ====================================================================================================== + + diff --git a/doc/build/html/_sources/pages/updates.rst.txt b/doc/build/html/_sources/pages/updates.rst.txt new file mode 100644 index 00000000..38b10ae3 --- /dev/null +++ b/doc/build/html/_sources/pages/updates.rst.txt @@ -0,0 +1,7 @@ +.. _updates: + +.. title:: List of updates + +Updates +================== + diff --git a/doc/build/html/_static/ajax-loader.gif b/doc/build/html/_static/ajax-loader.gif deleted file mode 100644 index 61faf8ca..00000000 Binary files a/doc/build/html/_static/ajax-loader.gif and /dev/null differ diff --git a/doc/build/html/_static/alabaster.css b/doc/build/html/_static/alabaster.css deleted file mode 100644 index a88ce299..00000000 --- a/doc/build/html/_static/alabaster.css +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif; - font-size: 17px; - background-color: #fff; - color: #000; - margin: 0; - padding: 0; -} - - -div.document { - width: 940px; - margin: 30px auto 0 auto; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 220px; -} - -div.sphinxsidebar { - width: 220px; - font-size: 14px; - line-height: 1.5; -} - -hr { - border: 1px solid #B1B4B6; -} - -div.body { - background-color: #fff; - color: #3E4349; - padding: 0 30px 0 30px; -} - -div.body > .section { - text-align: left; -} - -div.footer { - width: 940px; - margin: 20px auto 30px auto; - font-size: 14px; - color: #888; - text-align: right; -} - -div.footer a { - color: #888; -} - -p.caption { - font-family: inherit; - font-size: inherit; -} - - -div.relations { - display: none; -} - - -div.sphinxsidebar a { - color: #444; - text-decoration: none; - border-bottom: 1px dotted #999; -} - -div.sphinxsidebar a:hover { - border-bottom: 1px solid #999; -} - -div.sphinxsidebarwrapper { - padding: 18px 10px; -} - -div.sphinxsidebarwrapper p.logo { - padding: 0; - margin: -10px 0 0 0px; - text-align: center; -} - -div.sphinxsidebarwrapper h1.logo { - margin-top: -10px; - text-align: center; - margin-bottom: 5px; - text-align: left; -} - -div.sphinxsidebarwrapper h1.logo-name { - margin-top: 0px; -} - -div.sphinxsidebarwrapper p.blurb { - margin-top: 0; - font-style: normal; -} - -div.sphinxsidebar h3, -div.sphinxsidebar h4 { - font-family: 'Garamond', 'Georgia', serif; - color: #444; - font-size: 24px; - font-weight: normal; - margin: 0 0 5px 0; - padding: 0; -} - -div.sphinxsidebar h4 { - font-size: 20px; -} - -div.sphinxsidebar h3 a { - color: #444; -} - -div.sphinxsidebar p.logo a, -div.sphinxsidebar h3 a, -div.sphinxsidebar p.logo a:hover, -div.sphinxsidebar h3 a:hover { - border: none; -} - -div.sphinxsidebar p { - color: #555; - margin: 10px 0; -} - -div.sphinxsidebar ul { - margin: 10px 0; - padding: 0; - color: #000; -} - -div.sphinxsidebar ul li.toctree-l1 > a { - font-size: 120%; -} - -div.sphinxsidebar ul li.toctree-l2 > a { - font-size: 110%; -} - -div.sphinxsidebar input { - border: 1px solid #CCC; - font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif; - font-size: 1em; -} - -div.sphinxsidebar hr { - border: none; - height: 1px; - color: #AAA; - background: #AAA; - - text-align: left; - margin-left: 0; - width: 50%; -} - -/* -- body styles ----------------------------------------------------------- */ - -a { - color: #004B6B; - text-decoration: underline; -} - -a:hover { - color: #6D4100; - text-decoration: underline; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: 'Garamond', 'Georgia', serif; - font-weight: normal; - margin: 30px 0px 10px 0px; - padding: 0; -} - -div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } -div.body h2 { font-size: 180%; } -div.body h3 { font-size: 150%; } -div.body h4 { font-size: 130%; } -div.body h5 { font-size: 100%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #DDD; - padding: 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - color: #444; - background: #EAEAEA; -} - -div.body p, div.body dd, div.body li { - line-height: 1.4em; -} - -div.admonition { - margin: 20px 0px; - padding: 10px 30px; - background-color: #EEE; - border: 1px solid #CCC; -} - -div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { - background-color: ; - border-bottom: 1px solid #fafafa; -} - -dd div.admonition { - margin-left: -60px; - padding-left: 60px; -} - -div.admonition p.admonition-title { - font-family: 'Garamond', 'Georgia', serif; - font-weight: normal; - font-size: 24px; - margin: 0 0 10px 0; - padding: 0; - line-height: 1; -} - -div.admonition p.last { - margin-bottom: 0; -} - -div.highlight { - background-color: #fff; -} - -dt:target, .highlight { - background: #FAF3E8; -} - -div.warning { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.danger { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.error { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.caution { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.attention { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.important { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.note { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.tip { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.hint { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.seealso { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.topic { - background-color: #EEE; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre, tt, code { - font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; - font-size: 0.9em; -} - -.hll { - background-color: #FFC; - margin: 0 -12px; - padding: 0 12px; - display: block; -} - -img.screenshot { -} - -tt.descname, tt.descclassname, code.descname, code.descclassname { - font-size: 0.95em; -} - -tt.descname, code.descname { - padding-right: 0.08em; -} - -img.screenshot { - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils { - border: 1px solid #888; - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils td, table.docutils th { - border: 1px solid #888; - padding: 0.25em 0.7em; -} - -table.field-list, table.footnote { - border: none; - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - -table.footnote { - margin: 15px 0; - width: 100%; - border: 1px solid #EEE; - background: #FDFDFD; - font-size: 0.9em; -} - -table.footnote + table.footnote { - margin-top: -15px; - border-top: none; -} - -table.field-list th { - padding: 0 0.8em 0 0; -} - -table.field-list td { - padding: 0; -} - -table.field-list p { - margin-bottom: 0.8em; -} - -table.footnote td.label { - width: .1px; - padding: 0.3em 0 0.3em 0.5em; -} - -table.footnote td { - padding: 0.3em 0.5em; -} - -dl { - margin: 0; - padding: 0; -} - -dl dd { - margin-left: 30px; -} - -blockquote { - margin: 0 0 0 30px; - padding: 0; -} - -ul, ol { - /* Matches the 30px from the narrow-screen "li > ul" selector below */ - margin: 10px 0 10px 30px; - padding: 0; -} - -pre { - background: #EEE; - padding: 7px 30px; - margin: 15px 0px; - line-height: 1.3em; -} - -div.viewcode-block:target { - background: #ffd; -} - -dl pre, blockquote pre, li pre { - margin-left: 0; - padding-left: 30px; -} - -dl dl pre { - margin-left: -90px; - padding-left: 90px; -} - -tt, code { - background-color: #ecf0f3; - color: #222; - /* padding: 1px 2px; */ -} - -tt.xref, code.xref, a tt { - background-color: #FBFBFB; - border-bottom: 1px solid #fff; -} - -a.reference { - text-decoration: none; - border-bottom: 1px dotted #004B6B; -} - -/* Don't put an underline on images */ -a.image-reference, a.image-reference:hover { - border-bottom: none; -} - -a.reference:hover { - border-bottom: 1px solid #6D4100; -} - -a.footnote-reference { - text-decoration: none; - font-size: 0.7em; - vertical-align: top; - border-bottom: 1px dotted #004B6B; -} - -a.footnote-reference:hover { - border-bottom: 1px solid #6D4100; -} - -a:hover tt, a:hover code { - background: #EEE; -} - - -@media screen and (max-width: 870px) { - - div.sphinxsidebar { - display: none; - } - - div.document { - width: 100%; - - } - - div.documentwrapper { - margin-left: 0; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - } - - div.bodywrapper { - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - margin-left: 0; - } - - ul { - margin-left: 0; - } - - li > ul { - /* Matches the 30px from the "ul, ol" selector above */ - margin-left: 30px; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .bodywrapper { - margin: 0; - } - - .footer { - width: auto; - } - - .github { - display: none; - } - - - -} - - - -@media screen and (max-width: 875px) { - - body { - margin: 0; - padding: 20px 30px; - } - - div.documentwrapper { - float: none; - background: #fff; - } - - div.sphinxsidebar { - display: block; - float: none; - width: 102.5%; - margin: 50px -30px -20px -30px; - padding: 10px 20px; - background: #333; - color: #FFF; - } - - div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, - div.sphinxsidebar h3 a { - color: #fff; - } - - div.sphinxsidebar a { - color: #AAA; - } - - div.sphinxsidebar p.logo { - display: none; - } - - div.document { - width: 100%; - margin: 0; - } - - div.footer { - display: none; - } - - div.bodywrapper { - margin: 0; - } - - div.body { - min-height: 0; - padding: 0; - } - - .rtd_doc_footer { - display: none; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .footer { - width: auto; - } - - .github { - display: none; - } -} - - -/* misc. */ - -.revsys-inline { - display: none!important; -} - -/* Make nested-list/multi-paragraph items look better in Releases changelog - * pages. Without this, docutils' magical list fuckery causes inconsistent - * formatting between different release sub-lists. - */ -div#changelog > div.section > ul > li > p:only-child { - margin-bottom: 0; -} - -/* Hide fugly table cell borders in ..bibliography:: directive output */ -table.docutils.citation, table.docutils.citation td, table.docutils.citation th { - border: none; - /* Below needed in some edge cases; if not applied, bottom shadows appear */ - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} \ No newline at end of file diff --git a/doc/build/html/_static/alert_info_32.png b/doc/build/html/_static/alert_info_32.png deleted file mode 100644 index ea4d1baf..00000000 Binary files a/doc/build/html/_static/alert_info_32.png and /dev/null differ diff --git a/doc/build/html/_static/alert_warning_32.png b/doc/build/html/_static/alert_warning_32.png deleted file mode 100644 index a687c3dc..00000000 Binary files a/doc/build/html/_static/alert_warning_32.png and /dev/null differ diff --git a/doc/build/html/_static/background_b01.png b/doc/build/html/_static/background_b01.png deleted file mode 100644 index 353f26dd..00000000 Binary files a/doc/build/html/_static/background_b01.png and /dev/null differ diff --git a/doc/build/html/_static/basic.css b/doc/build/html/_static/basic.css index e3640e79..24bc73e7 100644 --- a/doc/build/html/_static/basic.css +++ b/doc/build/html/_static/basic.css @@ -55,7 +55,7 @@ div.sphinxsidebarwrapper { div.sphinxsidebar { float: left; - width: 210px; + width: 230px; margin-left: -100%; font-size: 90%; word-wrap: break-word; diff --git a/doc/build/html/_static/bg-page.png b/doc/build/html/_static/bg-page.png deleted file mode 100644 index fe0a6dc8..00000000 Binary files a/doc/build/html/_static/bg-page.png and /dev/null differ diff --git a/doc/build/html/_static/bizstyle.css b/doc/build/html/_static/bizstyle.css deleted file mode 100644 index def9cedc..00000000 --- a/doc/build/html/_static/bizstyle.css +++ /dev/null @@ -1,506 +0,0 @@ -/* - * bizstyle.css_t - * ~~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- business style theme. - * - * :copyright: Copyright 2011-2014 by Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; - font-size: 14px; - letter-spacing: -0.01em; - line-height: 150%; - text-align: center; - background-color: white; - background-image: url(background_b01.png); - color: black; - padding: 0; - border-right: 1px solid #336699; - border-left: 1px solid #336699; - - margin: 0px 40px 0px 40px; -} - -div.document { - background-color: white; - text-align: left; - background-repeat: repeat-x; - - -moz-box-shadow: 2px 2px 5px #000; - -webkit-box-shadow: 2px 2px 5px #000; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 240px; - border-left: 1px solid #ccc; -} - -div.body { - margin: 0; - padding: 0.5em 20px 20px 20px; -} -div.bodywrapper { - margin: 0 0 0 calc(210px + 30px); -} - -div.related { - font-size: 1em; - - -moz-box-shadow: 2px 2px 5px #000; - -webkit-box-shadow: 2px 2px 5px #000; -} - -div.related ul { - background-color: #336699; - height: 100%; - overflow: hidden; - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; -} - -div.related ul li { - color: white; - margin: 0; - padding: 0; - height: 2em; - float: left; -} - -div.related ul li.right { - float: right; - margin-right: 5px; -} - -div.related ul li a { - margin: 0; - padding: 0 5px 0 5px; - line-height: 1.75em; - color: #fff; -} - -div.related ul li a:hover { - color: #fff; - text-decoration: underline; -} - -div.sphinxsidebarwrapper { - padding: 0; -} - -div.sphinxsidebar { - padding: 0.5em 12px 12px 12px; - width: 210px; - font-size: 1em; - text-align: left; -} - -div.sphinxsidebar h3, div.sphinxsidebar h4 { - margin: 1em 0 0.5em 0; - font-size: 1em; - padding: 0.1em 0 0.1em 0.5em; - color: white; - border: 1px solid #336699; - background-color: #336699; -} - -div.sphinxsidebar h3 a { - color: white; -} - -div.sphinxsidebar ul { - padding-left: 1.5em; - margin-top: 7px; - padding: 0; - line-height: 130%; -} - -div.sphinxsidebar ul ul { - margin-left: 20px; -} - -div.sphinxsidebar input { - border: 1px solid #336699; -} - -div.footer { - background-color: white; - color: #336699; - padding: 3px 8px 3px 0; - clear: both; - font-size: 0.8em; - text-align: right; - border-bottom: 1px solid #336699; - - -moz-box-shadow: 2px 2px 5px #000; - -webkit-box-shadow: 2px 2px 5px #000; -} - -div.footer a { - color: #336699; - text-decoration: underline; -} - -/* -- body styles ----------------------------------------------------------- */ - -p { - margin: 0.8em 0 0.5em 0; -} - -a { - color: #336699; - text-decoration: none; -} - -a:hover { - color: #336699; - text-decoration: underline; -} - -div.body a { - text-decoration: underline; -} - -h1, h2, h3 { - color: #336699; -} - -h1 { - margin: 0; - padding: 0.7em 0 0.3em 0; - font-size: 1.5em; -} - -h2 { - margin: 1.3em 0 0.2em 0; - font-size: 1.35em; - padding-bottom: .5em; - border-bottom: 1px solid #336699; -} - -h3 { - margin: 1em 0 -0.3em 0; - font-size: 1.2em; - padding-bottom: .3em; - border-bottom: 1px solid #CCCCCC; -} - -div.body h1 a, div.body h2 a, div.body h3 a, -div.body h4 a, div.body h5 a, div.body h6 a { - color: black!important; -} - -h1 a.anchor, h2 a.anchor, h3 a.anchor, -h4 a.anchor, h5 a.anchor, h6 a.anchor { - display: none; - margin: 0 0 0 0.3em; - padding: 0 0.2em 0 0.2em; - color: #aaa!important; -} - -h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, -h5:hover a.anchor, h6:hover a.anchor { - display: inline; -} - -h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, -h5 a.anchor:hover, h6 a.anchor:hover { - color: #777; - background-color: #eee; -} - -a.headerlink { - color: #c60f0f!important; - font-size: 1em; - margin-left: 6px; - padding: 0 4px 0 4px; - text-decoration: none!important; -} - -a.headerlink:hover { - background-color: #ccc; - color: white!important; -} - -cite, code, tt { - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 0.95em; - letter-spacing: 0.01em; -} - -code { - background-color: #F2F2F2; - border-bottom: 1px solid #ddd; - color: #333; -} - -code.descname, code.descclassname, code.xref { - border: 0; -} - -hr { - border: 1px solid #abc; - margin: 2em; -} - -a code { - border: 0; - color: #CA7900; -} - -a code:hover { - color: #2491CF; -} - -pre { - background-color: transparent !important; - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 0.95em; - letter-spacing: 0.015em; - line-height: 120%; - padding: 0.5em; - border-right: 5px solid #ccc; - border-left: 5px solid #ccc; -} - -pre a { - color: inherit; - text-decoration: underline; -} - -td.linenos pre { - padding: 0.5em 0; -} - -div.quotebar { - background-color: #f8f8f8; - max-width: 250px; - float: right; - padding: 2px 7px; - border: 1px solid #ccc; -} - -div.topic { - background-color: #f8f8f8; -} - -table { - border-collapse: collapse; - margin: 0 -0.5em 0 -0.5em; -} - -table td, table th { - padding: 0.2em 0.5em 0.2em 0.5em; -} - -div.admonition { - font-size: 0.9em; - margin: 1em 0 1em 0; - border: 3px solid #cccccc; - background-color: #f7f7f7; - padding: 0; -} - -div.admonition p { - margin: 0.5em 1em 0.5em 1em; - padding: 0; -} - -div.admonition li p { - margin-left: 0; -} - -div.admonition pre, div.warning pre { - margin: 0; -} - -div.highlight { - margin: 0.4em 1em; -} - -div.admonition p.admonition-title { - margin: 0; - padding: 0.1em 0 0.1em 0.5em; - color: white; - border-bottom: 3px solid #cccccc; - font-weight: bold; - background-color: #165e83; -} - -div.danger { border: 3px solid #f0908d; background-color: #f0cfa0; } -div.error { border: 3px solid #f0908d; background-color: #ede4cd; } -div.warning { border: 3px solid #f8b862; background-color: #f0cfa0; } -div.caution { border: 3px solid #f8b862; background-color: #ede4cd; } -div.attention { border: 3px solid #f8b862; background-color: #f3f3f3; } -div.important { border: 3px solid #f0cfa0; background-color: #ede4cd; } -div.note { border: 3px solid #f0cfa0; background-color: #f3f3f3; } -div.hint { border: 3px solid #bed2c3; background-color: #f3f3f3; } -div.tip { border: 3px solid #bed2c3; background-color: #f3f3f3; } - -div.danger p.admonition-title, div.error p.admonition-title { - background-color: #b7282e; - border-bottom: 3px solid #f0908d; -} - -div.caution p.admonition-title, -div.warning p.admonition-title, -div.attention p.admonition-title { - background-color: #f19072; - border-bottom: 3px solid #f8b862; -} - -div.note p.admonition-title, div.important p.admonition-title { - background-color: #f8b862; - border-bottom: 3px solid #f0cfa0; -} - -div.hint p.admonition-title, div.tip p.admonition-title { - background-color: #7ebea5; - border-bottom: 3px solid #bed2c3; -} - -div.admonition ul, div.admonition ol, -div.warning ul, div.warning ol { - margin: 0.1em 0.5em 0.5em 3em; - padding: 0; -} - -div.versioninfo { - margin: 1em 0 0 0; - border: 1px solid #ccc; - background-color: #DDEAF0; - padding: 8px; - line-height: 1.3em; - font-size: 0.9em; -} - -.viewcode-back { - font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} - -p.versionchanged span.versionmodified { - font-size: 0.9em; - margin-right: 0.2em; - padding: 0.1em; - background-color: #DCE6A0; -} - -dl.field-list > dt { - color: white; - background-color: #82A0BE; -} - -dl.field-list > dd { - background-color: #f7f7f7; -} - -/* -- table styles ---------------------------------------------------------- */ - -table.docutils { - margin: 1em 0; - padding: 0; - border: 1px solid white; - background-color: #f7f7f7; -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 1px solid white; - border-bottom: 1px solid white; -} - -table.docutils td p { - margin-top: 0; - margin-bottom: 0.3em; -} - -table.field-list td, table.field-list th { - border: 0 !important; - word-break: break-word; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - color: white; - text-align: left; - padding-right: 5px; - background-color: #82A0BE; -} - -div.literal-block-wrapper div.code-block-caption { - background-color: #EEE; - border-style: solid; - border-color: #CCC; - border-width: 1px 5px; -} - -/* WIDE DESKTOP STYLE */ -@media only screen and (min-width: 1176px) { -body { - margin: 0 40px 0 40px; -} -} - -/* TABLET STYLE */ -@media only screen and (min-width: 768px) and (max-width: 991px) { -body { - margin: 0 40px 0 40px; -} -} - -/* MOBILE LAYOUT (PORTRAIT/320px) */ -@media only screen and (max-width: 767px) { -body { - margin: 0; -} -div.bodywrapper { - margin: 0; - width: 100%; - border: none; -} -div.sphinxsidebar { - display: none; -} -} - -/* MOBILE LAYOUT (LANDSCAPE/480px) */ -@media only screen and (min-width: 480px) and (max-width: 767px) { -body { - margin: 0 20px 0 20px; -} -} - -/* RETINA OVERRIDES */ -@media -only screen and (-webkit-min-device-pixel-ratio: 2), -only screen and (min-device-pixel-ratio: 2) { -} - -/* -- end ------------------------------------------------------------------- */ \ No newline at end of file diff --git a/doc/build/html/_static/bizstyle.js b/doc/build/html/_static/bizstyle.js deleted file mode 100644 index fc18d101..00000000 --- a/doc/build/html/_static/bizstyle.js +++ /dev/null @@ -1,41 +0,0 @@ -// -// bizstyle.js -// ~~~~~~~~~~~ -// -// Sphinx javascript -- for bizstyle theme. -// -// This theme was created by referring to 'sphinxdoc' -// -// :copyright: Copyright 2012-2014 by Sphinx team, see AUTHORS. -// :license: BSD, see LICENSE for details. -// -$(document).ready(function(){ - if (navigator.userAgent.indexOf('iPhone') > 0 || - navigator.userAgent.indexOf('Android') > 0) { - $("li.nav-item-0 a").text("Top"); - } - - $("div.related:first ul li:not(.right) a").slice(1).each(function(i, item){ - if (item.text.length > 20) { - var tmpstr = item.text - $(item).attr("title", tmpstr); - $(item).text(tmpstr.substr(0, 17) + "..."); - } - }); - $("div.related:last ul li:not(.right) a").slice(1).each(function(i, item){ - if (item.text.length > 20) { - var tmpstr = item.text - $(item).attr("title", tmpstr); - $(item).text(tmpstr.substr(0, 17) + "..."); - } - }); -}); - -$(window).resize(function(){ - if ($(window).width() <= 776) { - $("li.nav-item-0 a").text("Top"); - } - else { - $("li.nav-item-0 a").text("Predictive Clinical Neuroscience Toolkit 0.17 documentation"); - } -}); \ No newline at end of file diff --git a/doc/build/html/_static/bullet_orange.png b/doc/build/html/_static/bullet_orange.png deleted file mode 100644 index 1cb8097c..00000000 Binary files a/doc/build/html/_static/bullet_orange.png and /dev/null differ diff --git a/doc/build/html/_static/classic.css b/doc/build/html/_static/classic.css deleted file mode 100644 index 20db95e2..00000000 --- a/doc/build/html/_static/classic.css +++ /dev/null @@ -1,261 +0,0 @@ -/* - * classic.css_t - * ~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- classic theme. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: sans-serif; - font-size: 100%; - background-color: #11303d; - color: #000; - margin: 0; - padding: 0; -} - -div.document { - background-color: #1c4e63; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 230px; -} - -div.body { - background-color: #ffffff; - color: #000000; - padding: 0 20px 30px 20px; -} - -div.footer { - color: #ffffff; - width: 100%; - padding: 9px 0 9px 0; - text-align: center; - font-size: 75%; -} - -div.footer a { - color: #ffffff; - text-decoration: underline; -} - -div.related { - background-color: #133f52; - line-height: 30px; - color: #ffffff; -} - -div.related a { - color: #ffffff; -} - -div.sphinxsidebar { -} - -div.sphinxsidebar h3 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.4em; - font-weight: normal; - margin: 0; - padding: 0; -} - -div.sphinxsidebar h3 a { - color: #ffffff; -} - -div.sphinxsidebar h4 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.3em; - font-weight: normal; - margin: 5px 0 0 0; - padding: 0; -} - -div.sphinxsidebar p { - color: #ffffff; -} - -div.sphinxsidebar p.topless { - margin: 5px 10px 10px 10px; -} - -div.sphinxsidebar ul { - margin: 10px; - padding: 0; - color: #ffffff; -} - -div.sphinxsidebar a { - color: #98dbcc; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - - - -/* -- hyperlink styles ------------------------------------------------------ */ - -a { - color: #355f7c; - text-decoration: none; -} - -a:visited { - color: #355f7c; - text-decoration: none; -} - -a:hover { - text-decoration: underline; -} - - - -/* -- body styles ----------------------------------------------------------- */ - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: 'Trebuchet MS', sans-serif; - background-color: #f2f2f2; - font-weight: normal; - color: #20435c; - border-bottom: 1px solid #ccc; - margin: 20px -20px 10px -20px; - padding: 3px 0 3px 10px; -} - -div.body h1 { margin-top: 0; font-size: 200%; } -div.body h2 { font-size: 160%; } -div.body h3 { font-size: 140%; } -div.body h4 { font-size: 120%; } -div.body h5 { font-size: 110%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #c60f0f; - font-size: 0.8em; - padding: 0 4px 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - background-color: #c60f0f; - color: white; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - text-align: justify; - line-height: 130%; -} - -div.admonition p.admonition-title + p { - display: inline; -} - -div.admonition p { - margin-bottom: 5px; -} - -div.admonition pre { - margin-bottom: 5px; -} - -div.admonition ul, div.admonition ol { - margin-bottom: 5px; -} - -div.note { - background-color: #eee; - border: 1px solid #ccc; -} - -div.seealso { - background-color: #ffc; - border: 1px solid #ff6; -} - -div.topic { - background-color: #eee; -} - -div.warning { - background-color: #ffe4e4; - border: 1px solid #f66; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre { - padding: 5px; - background-color: #eeffcc; - color: #333333; - line-height: 120%; - border: 1px solid #ac9; - border-left: none; - border-right: none; -} - -code { - background-color: #ecf0f3; - padding: 0 1px 0 1px; - font-size: 0.95em; -} - -th { - background-color: #ede; -} - -.warning code { - background: #efc2c2; -} - -.note code { - background: #d6d6d6; -} - -.viewcode-back { - font-family: sans-serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} - -div.code-block-caption { - color: #efefef; - background-color: #1c4e63; -} \ No newline at end of file diff --git a/doc/build/html/_static/comment-bright.png b/doc/build/html/_static/comment-bright.png deleted file mode 100644 index 15e27edb..00000000 Binary files a/doc/build/html/_static/comment-bright.png and /dev/null differ diff --git a/doc/build/html/_static/comment-close.png b/doc/build/html/_static/comment-close.png deleted file mode 100644 index 4d91bcf5..00000000 Binary files a/doc/build/html/_static/comment-close.png and /dev/null differ diff --git a/doc/build/html/_static/comment.png b/doc/build/html/_static/comment.png deleted file mode 100644 index dfbc0cbd..00000000 Binary files a/doc/build/html/_static/comment.png and /dev/null differ diff --git a/doc/build/html/_static/contents.png b/doc/build/html/_static/contents.png deleted file mode 100644 index 6c59aa1f..00000000 Binary files a/doc/build/html/_static/contents.png and /dev/null differ diff --git a/doc/build/html/_static/css/badge_only.css b/doc/build/html/_static/css/badge_only.css new file mode 100644 index 00000000..e380325b --- /dev/null +++ b/doc/build/html/_static/css/badge_only.css @@ -0,0 +1 @@ +.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/doc/build/html/_static/css/fonts/Roboto-Slab-Bold.woff b/doc/build/html/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 00000000..6cb60000 Binary files /dev/null and b/doc/build/html/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/doc/build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 b/doc/build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 00000000..7059e231 Binary files /dev/null and b/doc/build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/doc/build/html/_static/css/fonts/Roboto-Slab-Regular.woff b/doc/build/html/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 00000000..f815f63f Binary files /dev/null and b/doc/build/html/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/doc/build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 b/doc/build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 00000000..f2c76e5b Binary files /dev/null and b/doc/build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/doc/build/html/_static/css/fonts/fontawesome-webfont.eot b/doc/build/html/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 00000000..e9f60ca9 Binary files /dev/null and b/doc/build/html/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/doc/build/html/_static/css/fonts/fontawesome-webfont.svg b/doc/build/html/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 00000000..855c845e --- /dev/null +++ b/doc/build/html/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/build/html/_static/css/fonts/fontawesome-webfont.ttf b/doc/build/html/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 00000000..35acda2f Binary files /dev/null and b/doc/build/html/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/doc/build/html/_static/css/fonts/fontawesome-webfont.woff b/doc/build/html/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 00000000..400014a4 Binary files /dev/null and b/doc/build/html/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/doc/build/html/_static/css/fonts/fontawesome-webfont.woff2 b/doc/build/html/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 00000000..4d13fc60 Binary files /dev/null and b/doc/build/html/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/doc/build/html/_static/css/fonts/lato-bold-italic.woff b/doc/build/html/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 00000000..88ad05b9 Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-bold-italic.woff differ diff --git a/doc/build/html/_static/css/fonts/lato-bold-italic.woff2 b/doc/build/html/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 00000000..c4e3d804 Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/doc/build/html/_static/css/fonts/lato-bold.woff b/doc/build/html/_static/css/fonts/lato-bold.woff new file mode 100644 index 00000000..c6dff51f Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-bold.woff differ diff --git a/doc/build/html/_static/css/fonts/lato-bold.woff2 b/doc/build/html/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 00000000..bb195043 Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-bold.woff2 differ diff --git a/doc/build/html/_static/css/fonts/lato-normal-italic.woff b/doc/build/html/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 00000000..76114bc0 Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-normal-italic.woff differ diff --git a/doc/build/html/_static/css/fonts/lato-normal-italic.woff2 b/doc/build/html/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 00000000..3404f37e Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/doc/build/html/_static/css/fonts/lato-normal.woff b/doc/build/html/_static/css/fonts/lato-normal.woff new file mode 100644 index 00000000..ae1307ff Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-normal.woff differ diff --git a/doc/build/html/_static/css/fonts/lato-normal.woff2 b/doc/build/html/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 00000000..3bf98433 Binary files /dev/null and b/doc/build/html/_static/css/fonts/lato-normal.woff2 differ diff --git a/doc/build/html/_static/css/functions.css b/doc/build/html/_static/css/functions.css new file mode 100644 index 00000000..574685be --- /dev/null +++ b/doc/build/html/_static/css/functions.css @@ -0,0 +1,5 @@ +.function { + border-bottom: 3px solid #d0d0d0; + padding-bottom: 10px; + padding-top: 10px; +} \ No newline at end of file diff --git a/doc/build/html/_static/css/pcntoolkit.css b/doc/build/html/_static/css/pcntoolkit.css new file mode 100644 index 00000000..0231067a --- /dev/null +++ b/doc/build/html/_static/css/pcntoolkit.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before,.wy-nav-top a,.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li span.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li span.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li span.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li span.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li span.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p.caption .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.btn .wy-menu-vertical li span.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p.caption .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.nav .wy-menu-vertical li span.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p.caption .btn .headerlink,.rst-content p.caption .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li span.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#F8F8F8}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#404040;background:#E1E1E1;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e5e9f6}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#1E90FF}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#e5e9f6}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#e5e9f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#1E90FF}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#9fefef}.wy-tray-container li.wy-tray-item-info{background:#1E90FF}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#9fefef;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#1E90FF;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#1E90FF!important}.btn-info:hover{background-color:#1E90FF!important}.btn-neutral{background-color:#e5e9f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#9fefef!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#1E90FF;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#1E90FF!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#ffffff;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#1E90FF;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#ffffff;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#1E90FF;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #1E90FF}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:##e5e9f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#9fefef}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#9fefef}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#1E90FF}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#e5e9f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#1E90FF;text-decoration:none;cursor:pointer}a:hover{color:#1E90FF}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#1E90FF!important}a.wy-text-info:hover{color:#1E90FF!important}.wy-text-success{color:#9fefef!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #1E90FF;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e5e9f6;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol li,.rst-content ol.arabic li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content ol.arabic li p:last-child,.rst-content ol.arabic li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.rst-content .wy-breadcrumbs li tt,.wy-breadcrumbs li .rst-content tt,.wy-breadcrumbs li code{padding:5px;border:none;background:none}.rst-content .wy-breadcrumbs li tt.literal,.wy-breadcrumbs li .rst-content tt.literal,.wy-breadcrumbs li code.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#1E90FF;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#ffffff;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#ffffff}.wy-menu-vertical li.current>a:hover span.toctree-expand,.wy-menu-vertical li.on a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#d9d9d9}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:93%;color:#1E90FF}.wy-menu-vertical a:hover{background-color:hsla(0,0%,100%,.8);cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#1E90FF;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#1E90FF;text-align:center;color:#ffffff}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#1E90FF}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#1E90FF;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#ffffff;font-size:168%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#1E90FF}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#1E90FF;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#ffffff}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#e7e5f6;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#1E90FF;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#1E90FF;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#ffffff;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e5e9f6;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#ffffff}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#ffffff}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#ffffff;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#1E90FF;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#9fefef;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand{color:#ffffff}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#ffffff}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #ffffff;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp{user-select:none;pointer-events:none}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content .code-block-caption .headerlink:after,.rst-content .toctree-wrapper>p.caption .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"\f0c1";font-family:FontAwesome}.rst-content .code-block-caption:hover .headerlink:after,.rst-content .toctree-wrapper>p.caption:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e5e9f6}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e5e9f6;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .hlist{width:100%}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl dt span.classifier:before{content:" : "}html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.field-list>dt:after,html.writer-html5 .rst-content dl.footnote>dt:after{content:":"}html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.footnote>dt>span.brackets{margin-right:.5rem}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{font-style:italic}html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.footnote>dd p,html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e5e9f6}html.writer-html5 .rst-content table.docutils th{border:1px solid #e5e9f6}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#1E90FF}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7e5f6;color:#1E90FF;border-top:3px solid #1E90FF;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:before{color:#1E90FF}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code,html.writer-html4 .rst-content dl:not(.docutils) tt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#ef9fe4;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #1E90FF;background:#e5e9f6;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} pre.literal-block{border:1px solid #ffffff;white-space:pre;margin:1px 0 24px 0;padding:12px 12px;overflow-x:auto;background:#e7e5f6;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.5;display:block;color:#404040} \ No newline at end of file diff --git a/doc/build/html/_static/css/pcntoolkit_nomaxwidth.css b/doc/build/html/_static/css/pcntoolkit_nomaxwidth.css new file mode 100644 index 00000000..fef75932 --- /dev/null +++ b/doc/build/html/_static/css/pcntoolkit_nomaxwidth.css @@ -0,0 +1,8 @@ + +.wy-nav-content { + max-width: none; +} + + +@media screen and (max-width:768px){.tablet-hide{display:none}} +@media screen and (max-width:480px){.mobile-hide{display:none}} diff --git a/doc/build/html/_static/css/pcntoolkit_tabs.css b/doc/build/html/_static/css/pcntoolkit_tabs.css new file mode 100644 index 00000000..db063843 --- /dev/null +++ b/doc/build/html/_static/css/pcntoolkit_tabs.css @@ -0,0 +1,44 @@ +.sphinx-tabs { + margin-bottom: 2em; +} + +.sphinx-tabs:last-child { + margin-bottom: 1em; +} + +.sphinx-tabs .sphinx-menu .item p { + margin: 0; +} + +.sphinx-tabs .sphinx-menu a.item { + color: #1E90FF !important; +} + +.sphinx-tabs .sphinx-menu { + border-bottom-color: #1E90FF !important; + display: flex; + flex-direction: row; + flex-wrap: wrap; +} + +.sphinx-tabs .sphinx-menu a.active.item { + border-color: #1E90FF !important; +} + +.sphinx-tab { + border-color: #1E90FF !important; + box-sizing: border-box; +} + +.sphinx-tab.tab.active { + margin-bottom: 0; +} + +/* Code tabs don't need the code-block border */ +.code-tab.tab { + padding: 0.4em !important; +} + +.code-tab.tab div[class^='highlight'] { + border: none; +} diff --git a/doc/build/html/_static/css/theme.css b/doc/build/html/_static/css/theme.css new file mode 100644 index 00000000..8cd4f101 --- /dev/null +++ b/doc/build/html/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before,.wy-nav-top a,.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li span.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li span.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li span.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li span.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li span.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p.caption .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.btn .wy-menu-vertical li span.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p.caption .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.nav .wy-menu-vertical li span.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p.caption .btn .headerlink,.rst-content p.caption .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li span.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol li,.rst-content ol.arabic li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content ol.arabic li p:last-child,.rst-content ol.arabic li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.rst-content .wy-breadcrumbs li tt,.wy-breadcrumbs li .rst-content tt,.wy-breadcrumbs li code{padding:5px;border:none;background:none}.rst-content .wy-breadcrumbs li tt.literal,.wy-breadcrumbs li .rst-content tt.literal,.wy-breadcrumbs li code.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover span.toctree-expand,.wy-menu-vertical li.on a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp{user-select:none;pointer-events:none}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content .code-block-caption .headerlink:after,.rst-content .toctree-wrapper>p.caption .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"\f0c1";font-family:FontAwesome}.rst-content .code-block-caption:hover .headerlink:after,.rst-content .toctree-wrapper>p.caption:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .hlist{width:100%}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl dt span.classifier:before{content:" : "}html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.field-list>dt:after,html.writer-html5 .rst-content dl.footnote>dt:after{content:":"}html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.footnote>dt>span.brackets{margin-right:.5rem}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{font-style:italic}html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.footnote>dd p,html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code,html.writer-html4 .rst-content dl:not(.docutils) tt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/doc/build/html/_static/css3-mediaqueries.js b/doc/build/html/_static/css3-mediaqueries.js deleted file mode 100644 index 59735f59..00000000 --- a/doc/build/html/_static/css3-mediaqueries.js +++ /dev/null @@ -1 +0,0 @@ -if(typeof Object.create!=="function"){Object.create=function(e){function t(){}t.prototype=e;return new t}}var ua={toString:function(){return navigator.userAgent},test:function(e){return this.toString().toLowerCase().indexOf(e.toLowerCase())>-1}};ua.version=(ua.toString().toLowerCase().match(/[\s\S]+(?:rv|it|ra|ie)[\/: ]([\d.]+)/)||[])[1];ua.webkit=ua.test("webkit");ua.gecko=ua.test("gecko")&&!ua.webkit;ua.opera=ua.test("opera");ua.ie=ua.test("msie")&&!ua.opera;ua.ie6=ua.ie&&document.compatMode&&typeof document.documentElement.style.maxHeight==="undefined";ua.ie7=ua.ie&&document.documentElement&&typeof document.documentElement.style.maxHeight!=="undefined"&&typeof XDomainRequest==="undefined";ua.ie8=ua.ie&&typeof XDomainRequest!=="undefined";var domReady=function(){var e=[];var t=function(){if(!arguments.callee.done){arguments.callee.done=true;for(var t=0;t=200&&r.status<300||r.status===304||navigator.userAgent.indexOf("Safari")>-1&&typeof r.status==="undefined"){t(r.responseText)}else{n()}document.documentElement.style.cursor="";r=null}};r.send("")};var l=function(t){t=t.replace(e.REDUNDANT_COMPONENTS,"");t=t.replace(e.REDUNDANT_WHITESPACE,"$1");t=t.replace(e.WHITESPACE_IN_PARENTHESES,"($1)");t=t.replace(e.MORE_WHITESPACE," ");t=t.replace(e.FINAL_SEMICOLONS,"}");return t};var c={stylesheet:function(t){var n={};var r=[],i=[],s=[],o=[];var u=t.cssHelperText;var a=t.getAttribute("media");if(a){var f=a.toLowerCase().split(",")}else{var f=["all"]}for(var l=0;l-1&&a.href&&a.href.length!==0&&!a.disabled){r[r.length]=a}}if(r.length>0){var c=0;var d=function(){c++;if(c===r.length){i()}};var v=function(t){var n=t.href;f(n,function(r){r=l(r).replace(e.RELATIVE_URLS,"url("+n.substring(0,n.lastIndexOf("/"))+"/$1)");t.cssHelperText=r;d()},d)};for(u=0;u0){r.setAttribute("media",t.join(","))}document.getElementsByTagName("head")[0].appendChild(r);if(r.styleSheet){r.styleSheet.cssText=e}else{r.appendChild(document.createTextNode(e))}r.addedWithCssHelper=true;if(typeof n==="undefined"||n===true){cssHelper.parsed(function(t){var n=p(r,e);for(var i in n){if(n.hasOwnProperty(i)){g(i,n[i])}}a("newStyleParsed",r)})}else{r.parsingDisallowed=true}return r},removeStyle:function(e){return e.parentNode.removeChild(e)},parsed:function(e){if(n){s(e)}else{if(typeof t!=="undefined"){if(typeof e==="function"){e(t)}}else{s(e);d()}}},stylesheets:function(e){cssHelper.parsed(function(t){e(m.stylesheets||y("stylesheets"))})},mediaQueryLists:function(e){cssHelper.parsed(function(t){e(m.mediaQueryLists||y("mediaQueryLists"))})},rules:function(e){cssHelper.parsed(function(t){e(m.rules||y("rules"))})},selectors:function(e){cssHelper.parsed(function(t){e(m.selectors||y("selectors"))})},declarations:function(e){cssHelper.parsed(function(t){e(m.declarations||y("declarations"))})},properties:function(e){cssHelper.parsed(function(t){e(m.properties||y("properties"))})},broadcast:a,addListener:function(e,t){if(typeof t==="function"){if(!u[e]){u[e]={listeners:[]}}u[e].listeners[u[e].listeners.length]=t}},removeListener:function(e,t){if(typeof t==="function"&&u[e]){var n=u[e].listeners;for(var r=0;r=a||s&&l0}}else if("device-height"===e.substring(r-13,r)){c=screen.height;if(t!==null){if(u==="length"){return i&&c>=a||s&&c0}}else if("width"===e.substring(r-5,r)){l=document.documentElement.clientWidth||document.body.clientWidth;if(t!==null){if(u==="length"){return i&&l>=a||s&&l0}}else if("height"===e.substring(r-6,r)){c=document.documentElement.clientHeight||document.body.clientHeight;if(t!==null){if(u==="length"){return i&&c>=a||s&&c0}}else if("device-aspect-ratio"===e.substring(r-19,r)){return u==="aspect-ratio"&&screen.width*a[1]===screen.height*a[0]}else if("color-index"===e.substring(r-11,r)){var h=Math.pow(2,screen.colorDepth);if(t!==null){if(u==="absolute"){return i&&h>=a||s&&h0}}else if("color"===e.substring(r-5,r)){var p=screen.colorDepth;if(t!==null){if(u==="absolute"){return i&&p>=a||s&&p0}}else if("resolution"===e.substring(r-10,r)){var d;if(f==="dpcm"){d=o("1cm")}else{d=o("1in")}if(t!==null){if(u==="resolution"){return i&&d>=a||s&&d0}}else{return false}};var a=function(e){var t=e.getValid();var n=e.getExpressions();var r=n.length;if(r>0){for(var i=0;i0){u=false;for(var f=0;f0){l[c++]=","}l[c++]=h}}if(l.length>0){r[r.length]=cssHelper.addStyle("@media "+l.join("")+"{"+e.getCssText()+"}",t,false)}};var l=function(e,t){for(var n=0;n0}}var o=[],u=[];for(var f in i){if(i.hasOwnProperty(f)){o[o.length]=f;if(i[f]){u[u.length]=f}if(f==="all"){n=true}}}if(u.length>0){r[r.length]=cssHelper.addStyle(e.getCssText(),u,false)}var c=e.getMediaQueryLists();if(n){l(c)}else{l(c,o)}};var h=function(e){for(var t=0;td||Math.abs(s-t)>d){e=n;t=s;clearTimeout(r);r=setTimeout(function(){if(!i()){p()}else{cssHelper.broadcast("cssMediaQueriesTested")}},500)}};window.onresize=function(){var e=window.onresize||function(){};return function(){e();s()}}()};var m=document.documentElement;m.style.marginLeft="-32767px";setTimeout(function(){m.style.marginLeft=""},5e3);return function(){if(!i()){cssHelper.addListener("newStyleParsed",function(e){c(e.cssHelperParsed.stylesheet)});cssHelper.addListener("cssMediaQueriesTested",function(){if(ua.ie){m.style.width="1px"}setTimeout(function(){m.style.width="";m.style.marginLeft=""},0);cssHelper.removeListener("cssMediaQueriesTested",arguments.callee)});s();p()}else{m.style.marginLeft=""}v()}}());try{document.execCommand("BackgroundImageCache",false,true)}catch(e){} diff --git a/doc/build/html/_static/css3-mediaqueries_src.js b/doc/build/html/_static/css3-mediaqueries_src.js deleted file mode 100644 index 78786202..00000000 --- a/doc/build/html/_static/css3-mediaqueries_src.js +++ /dev/null @@ -1,1104 +0,0 @@ -/* -css3-mediaqueries.js - CSS Helper and CSS3 Media Queries Enabler - -author: Wouter van der Graaf -version: 1.0 (20110330) -license: MIT -website: http://code.google.com/p/css3-mediaqueries-js/ - -W3C spec: http://www.w3.org/TR/css3-mediaqueries/ - -Note: use of embedded ",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/doc/build/html/_static/js/html5shiv.min.js b/doc/build/html/_static/js/html5shiv.min.js new file mode 100644 index 00000000..cd1c674f --- /dev/null +++ b/doc/build/html/_static/js/html5shiv.min.js @@ -0,0 +1,4 @@ +/** +* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed +*/ +!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/doc/build/html/_static/js/modernizr.min.js b/doc/build/html/_static/js/modernizr.min.js new file mode 100644 index 00000000..f65d4797 --- /dev/null +++ b/doc/build/html/_static/js/modernizr.min.js @@ -0,0 +1,4 @@ +/* Modernizr 2.6.2 (Custom Build) | MIT & BSD + * Build: http://modernizr.com/download/#-fontface-backgroundsize-borderimage-borderradius-boxshadow-flexbox-hsla-multiplebgs-opacity-rgba-textshadow-cssanimations-csscolumns-generatedcontent-cssgradients-cssreflections-csstransforms-csstransforms3d-csstransitions-applicationcache-canvas-canvastext-draganddrop-hashchange-history-audio-video-indexeddb-input-inputtypes-localstorage-postmessage-sessionstorage-websockets-websqldatabase-webworkers-geolocation-inlinesvg-smil-svg-svgclippaths-touch-webgl-shiv-mq-cssclasses-addtest-prefixed-teststyles-testprop-testallprops-hasevent-prefixes-domprefixes-load + */ +;window.Modernizr=function(a,b,c){function D(a){j.cssText=a}function E(a,b){return D(n.join(a+";")+(b||""))}function F(a,b){return typeof a===b}function G(a,b){return!!~(""+a).indexOf(b)}function H(a,b){for(var d in a){var e=a[d];if(!G(e,"-")&&j[e]!==c)return b=="pfx"?e:!0}return!1}function I(a,b,d){for(var e in a){var f=b[a[e]];if(f!==c)return d===!1?a[e]:F(f,"function")?f.bind(d||b):f}return!1}function J(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+p.join(d+" ")+d).split(" ");return F(b,"string")||F(b,"undefined")?H(e,b):(e=(a+" "+q.join(d+" ")+d).split(" "),I(e,b,c))}function K(){e.input=function(c){for(var d=0,e=c.length;d',a,""].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}t.length>0&&($(".wy-menu-vertical .current").removeClass("current"),t.addClass("current"),t.closest("li.toctree-l1").addClass("current"),t.closest("li.toctree-l1").parent().addClass("current"),t.closest("li.toctree-l1").addClass("current"),t.closest("li.toctree-l2").addClass("current"),t.closest("li.toctree-l3").addClass("current"),t.closest("li.toctree-l4").addClass("current"),t.closest("li.toctree-l5").addClass("current"),t[0].scrollIntoView())}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t, it's generated by ReST for `x` */ - font-size: 13px; - font-family: 'Bitstream Vera Sans Mono', 'Monaco', monospace; - font-weight: bold; - font-style: normal; -} - -div.admonition { - margin: 10px 0 10px 0; - padding: 10px; - border: 1px solid #ccc; -} - -div.admonition p.admonition-title { - background-color: #28437f; - color: white; - margin: -10px -10px 10px -10px; - padding: 4px 10px 4px 10px; - font-weight: bold; - font-size: 15px; -} - -div.admonition p.admonition-title a { - color: white!important; -} - -a.headerlink { - color: #B4B4B4!important; - font-size: 0.8em; - padding: 0 4px 0 4px; - text-decoration: none!important; - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -a.headerlink:hover { - background-color: #B4B4B4; - color: #F0F0F0!important; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.indextable { - width: 100%; -} - -table.genindextable td { - vertical-align: top; - width: 50%; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; - font-size: 11px; -} - -table.indextable ul a { - color: #000; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -table.modindextable { - width: 100%; - border: none; -} - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -table.modindextable img.toggler { - margin-right: 10px; -} - -dl.function dt, -dl.class dt, -dl.exception dt, -dl.method dt, -dl.attribute dt { - font-weight: normal; -} - -dt .descname { - font-weight: bold; - margin-right: 4px; -} - -dt .sig-paren { - font-size: larger; -} - -dt .descname, dt .descclassname { - padding: 0; - background: transparent; - border-bottom: 1px solid #111; -} - -dt .descclassname { - margin-left: 2px; -} - -dl dt big { - font-size: 100%; -} - -ul.search { - margin: 10px 0 0 30px; - padding: 0; -} - -ul.search li { - margin: 10px 0 0 0; - padding: 0; -} - -ul.search div.context { - font-size: 12px; - padding: 4px 0 0 20px; - color: #888; -} - -span.highlight { - background-color: #eee; - border: 1px solid #ccc; -} - -#toc { - margin: 0 -17px 0 -17px; - display: none; -} - -#toc h3 { - float: right; - margin: 5px 5px 0 0; - padding: 0; - font-size: 12px; - color: #777; -} - -#toc h3:hover { - color: #333; - cursor: pointer; -} - -.expandedtoc { - background: #222 url(darkmetal.png); - border-bottom: 1px solid #111; - outline-bottom: 1px solid #000; - padding: 5px; -} - -.expandedtoc h3 { - color: #aaa; - margin: 0!important; -} - -.expandedtoc h3:hover { - color: white!important; -} - -#tod h3:hover { - color: white; -} - -#toc a { - color: #ddd; - text-decoration: none; -} - -#toc a:hover { - color: white; - text-decoration: underline; -} - -#toc ul { - margin: 5px 0 12px 17px; - padding: 0 7px 0 7px; -} - -#toc ul ul { - margin-bottom: 0; -} - -#toc ul li { - margin: 2px 0 0 0; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: 'Georgia', serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; - margin: -1px -5px; - padding: 0 5px; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* math display */ - -div.math p { - text-align: center; -} - -span.eqno { - float: right; -} \ No newline at end of file diff --git a/doc/build/html/_static/sidebar.js b/doc/build/html/_static/sidebar.js deleted file mode 100644 index 4282fe91..00000000 --- a/doc/build/html/_static/sidebar.js +++ /dev/null @@ -1,159 +0,0 @@ -/* - * sidebar.js - * ~~~~~~~~~~ - * - * This script makes the Sphinx sidebar collapsible. - * - * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds - * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton - * used to collapse and expand the sidebar. - * - * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden - * and the width of the sidebar and the margin-left of the document - * are decreased. When the sidebar is expanded the opposite happens. - * This script saves a per-browser/per-session cookie used to - * remember the position of the sidebar among the pages. - * Once the browser is closed the cookie is deleted and the position - * reset to the default (expanded). - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -$(function() { - - - - - - - - - // global elements used by the functions. - // the 'sidebarbutton' element is defined as global after its - // creation, in the add_sidebar_button function - var bodywrapper = $('.bodywrapper'); - var sidebar = $('.sphinxsidebar'); - var sidebarwrapper = $('.sphinxsidebarwrapper'); - - // for some reason, the document has no sidebar; do not run into errors - if (!sidebar.length) return; - - // original margin-left of the bodywrapper and width of the sidebar - // with the sidebar expanded - var bw_margin_expanded = bodywrapper.css('margin-left'); - var ssb_width_expanded = sidebar.width(); - - // margin-left of the bodywrapper and width of the sidebar - // with the sidebar collapsed - var bw_margin_collapsed = '.8em'; - var ssb_width_collapsed = '.8em'; - - // colors used by the current theme - var dark_color = $('.related').css('background-color'); - var light_color = $('.document').css('background-color'); - - function sidebar_is_collapsed() { - return sidebarwrapper.is(':not(:visible)'); - } - - function toggle_sidebar() { - if (sidebar_is_collapsed()) - expand_sidebar(); - else - collapse_sidebar(); - } - - function collapse_sidebar() { - sidebarwrapper.hide(); - sidebar.css('width', ssb_width_collapsed); - bodywrapper.css('margin-left', bw_margin_collapsed); - sidebarbutton.css({ - 'margin-left': '0', - 'height': bodywrapper.height() - }); - sidebarbutton.find('span').text('»'); - sidebarbutton.attr('title', _('Expand sidebar')); - document.cookie = 'sidebar=collapsed'; - } - - function expand_sidebar() { - bodywrapper.css('margin-left', bw_margin_expanded); - sidebar.css('width', ssb_width_expanded); - sidebarwrapper.show(); - sidebarbutton.css({ - 'margin-left': ssb_width_expanded-12, - 'height': bodywrapper.height() - }); - sidebarbutton.find('span').text('«'); - sidebarbutton.attr('title', _('Collapse sidebar')); - document.cookie = 'sidebar=expanded'; - } - - function add_sidebar_button() { - sidebarwrapper.css({ - 'float': 'left', - 'margin-right': '0', - 'width': ssb_width_expanded - 28 - }); - // create the button - sidebar.append( - '
«
' - ); - var sidebarbutton = $('#sidebarbutton'); - light_color = sidebarbutton.css('background-color'); - // find the height of the viewport to center the '<<' in the page - var viewport_height; - if (window.innerHeight) - viewport_height = window.innerHeight; - else - viewport_height = $(window).height(); - sidebarbutton.find('span').css({ - 'display': 'block', - 'margin-top': (viewport_height - sidebar.position().top - 20) / 2 - }); - - sidebarbutton.click(toggle_sidebar); - sidebarbutton.attr('title', _('Collapse sidebar')); - sidebarbutton.css({ - 'color': '#FFFFFF', - 'border-left': '1px solid ' + dark_color, - 'font-size': '1.2em', - 'cursor': 'pointer', - 'height': bodywrapper.height(), - 'padding-top': '1px', - 'margin-left': ssb_width_expanded - 12 - }); - - sidebarbutton.hover( - function () { - $(this).css('background-color', dark_color); - }, - function () { - $(this).css('background-color', light_color); - } - ); - } - - function set_position_from_cookie() { - if (!document.cookie) - return; - var items = document.cookie.split(';'); - for(var k=0; k { + const allTabs = document.querySelectorAll('.sphinx-tabs-tab'); + const tabLists = document.querySelectorAll('[role="tablist"]'); + + allTabs.forEach(tab => { + tab.addEventListener("click", changeTabs); + }); + + tabLists.forEach(tabList => { + tabList.addEventListener("keydown", keyTabs); + }); + + // Restore group tab selection from session + const lastSelected = session.getItem('sphinx-tabs-last-selected'); + if (lastSelected != null) selectNamedTabs(lastSelected); +}); + +/** + * Key focus left and right between sibling elements using arrows + * @param {Node} e the element in focus when key was pressed + */ +function keyTabs(e) { + const tab = e.target; + let nextTab = null; + if (e.keyCode === 39 || e.keyCode === 37) { + tab.setAttribute("tabindex", -1); + // Move right + if (e.keyCode === 39) { + nextTab = tab.nextElementSibling; + if (nextTab === null) { + nextTab = tab.parentNode.firstElementChild; + } + // Move left + } else if (e.keyCode === 37) { + nextTab = tab.previousElementSibling; + if (nextTab === null) { + nextTab = tab.parentNode.lastElementChild; + } + } + } + + if (nextTab !== null) { + nextTab.setAttribute("tabindex", 0); + nextTab.focus(); + } +} + +/** + * Select or deselect clicked tab. If a group tab + * is selected, also select tab in other tabLists. + * @param {Node} e the element that was clicked + */ +function changeTabs(e) { + // Use this instead of the element that was clicked, in case it's a child + const notSelected = this.getAttribute("aria-selected") === "false"; + const positionBefore = this.parentNode.getBoundingClientRect().top; + const notClosable = !this.parentNode.classList.contains("closeable"); + + deselectTabList(this); + + if (notSelected || notClosable) { + selectTab(this); + const name = this.getAttribute("name"); + selectNamedTabs(name, this.id); + + if (this.classList.contains("group-tab")) { + // Persist during session + session.setItem('sphinx-tabs-last-selected', name); + } + } + + const positionAfter = this.parentNode.getBoundingClientRect().top; + const positionDelta = positionAfter - positionBefore; + // Scroll to offset content resizing + window.scrollTo(0, window.scrollY + positionDelta); +} + +/** + * Select tab and show associated panel. + * @param {Node} tab tab to select + */ +function selectTab(tab) { + tab.setAttribute("aria-selected", true); + + // Show the associated panel + document + .getElementById(tab.getAttribute("aria-controls")) + .removeAttribute("hidden"); +} + +/** + * Hide the panels associated with all tabs within the + * tablist containing this tab. + * @param {Node} tab a tab within the tablist to deselect + */ +function deselectTabList(tab) { + const parent = tab.parentNode; + const grandparent = parent.parentNode; + + Array.from(parent.children) + .forEach(t => t.setAttribute("aria-selected", false)); + + Array.from(grandparent.children) + .slice(1) // Skip tablist + .forEach(panel => panel.setAttribute("hidden", true)); +} + +/** + * Select grouped tabs with the same name, but no the tab + * with the given id. + * @param {Node} name name of grouped tab to be selected + * @param {Node} clickedId id of clicked tab + */ +function selectNamedTabs(name, clickedId=null) { + const groupedTabs = document.querySelectorAll(`.sphinx-tabs-tab[name="${name}"]`); + const tabLists = Array.from(groupedTabs).map(tab => tab.parentNode); + + tabLists + .forEach(tabList => { + // Don't want to change the tabList containing the clicked tab + const clickedTab = tabList.querySelector(`[id="${clickedId}"]`); + if (clickedTab === null ) { + // Select first tab with matching name + const tab = tabList.querySelector(`.sphinx-tabs-tab[name="${name}"]`); + deselectTabList(tab); + selectTab(tab); + } + }) +} + +exports.keyTabs = keyTabs; +exports.changeTabs = changeTabs; +exports.selectTab = selectTab; +exports.deselectTabList = deselectTabList; +exports.selectNamedTabs = selectNamedTabs; diff --git a/doc/build/html/_static/theme_extras.js b/doc/build/html/_static/theme_extras.js deleted file mode 100644 index 1c042187..00000000 --- a/doc/build/html/_static/theme_extras.js +++ /dev/null @@ -1,26 +0,0 @@ -$(function() { - - var - toc = $('#toc').show(), - items = $('#toc > ul').hide(); - - $('#toc h3') - .click(function() { - if (items.is(':visible')) { - items.animate({ - height: 'hide', - opacity: 'hide' - }, 300, function() { - toc.removeClass('expandedtoc'); - }); - } - else { - items.animate({ - height: 'show', - opacity: 'show' - }, 400); - toc.addClass('expandedtoc'); - } - }); - -}); diff --git a/doc/build/html/_static/transparent.gif b/doc/build/html/_static/transparent.gif deleted file mode 100644 index 0341802e..00000000 Binary files a/doc/build/html/_static/transparent.gif and /dev/null differ diff --git a/doc/build/html/_static/underscore-1.13.1.js b/doc/build/html/_static/underscore-1.13.1.js new file mode 100644 index 00000000..ffd77af9 --- /dev/null +++ b/doc/build/html/_static/underscore-1.13.1.js @@ -0,0 +1,2042 @@ +(function (global, factory) { + typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : + typeof define === 'function' && define.amd ? define('underscore', factory) : + (global = typeof globalThis !== 'undefined' ? globalThis : global || self, (function () { + var current = global._; + var exports = global._ = factory(); + exports.noConflict = function () { global._ = current; return exports; }; + }())); +}(this, (function () { + // Underscore.js 1.13.1 + // https://underscorejs.org + // (c) 2009-2021 Jeremy Ashkenas, Julian Gonggrijp, and DocumentCloud and Investigative Reporters & Editors + // Underscore may be freely distributed under the MIT license. + + // Current version. + var VERSION = '1.13.1'; + + // Establish the root object, `window` (`self`) in the browser, `global` + // on the server, or `this` in some virtual machines. We use `self` + // instead of `window` for `WebWorker` support. + var root = typeof self == 'object' && self.self === self && self || + typeof global == 'object' && global.global === global && global || + Function('return this')() || + {}; + + // Save bytes in the minified (but not gzipped) version: + var ArrayProto = Array.prototype, ObjProto = Object.prototype; + var SymbolProto = typeof Symbol !== 'undefined' ? Symbol.prototype : null; + + // Create quick reference variables for speed access to core prototypes. + var push = ArrayProto.push, + slice = ArrayProto.slice, + toString = ObjProto.toString, + hasOwnProperty = ObjProto.hasOwnProperty; + + // Modern feature detection. + var supportsArrayBuffer = typeof ArrayBuffer !== 'undefined', + supportsDataView = typeof DataView !== 'undefined'; + + // All **ECMAScript 5+** native function implementations that we hope to use + // are declared here. + var nativeIsArray = Array.isArray, + nativeKeys = Object.keys, + nativeCreate = Object.create, + nativeIsView = supportsArrayBuffer && ArrayBuffer.isView; + + // Create references to these builtin functions because we override them. + var _isNaN = isNaN, + _isFinite = isFinite; + + // Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed. + var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString'); + var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString', + 'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString']; + + // The largest integer that can be represented exactly. + var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1; + + // Some functions take a variable number of arguments, or a few expected + // arguments at the beginning and then a variable number of values to operate + // on. This helper accumulates all remaining arguments past the function’s + // argument length (or an explicit `startIndex`), into an array that becomes + // the last argument. Similar to ES6’s "rest parameter". + function restArguments(func, startIndex) { + startIndex = startIndex == null ? func.length - 1 : +startIndex; + return function() { + var length = Math.max(arguments.length - startIndex, 0), + rest = Array(length), + index = 0; + for (; index < length; index++) { + rest[index] = arguments[index + startIndex]; + } + switch (startIndex) { + case 0: return func.call(this, rest); + case 1: return func.call(this, arguments[0], rest); + case 2: return func.call(this, arguments[0], arguments[1], rest); + } + var args = Array(startIndex + 1); + for (index = 0; index < startIndex; index++) { + args[index] = arguments[index]; + } + args[startIndex] = rest; + return func.apply(this, args); + }; + } + + // Is a given variable an object? + function isObject(obj) { + var type = typeof obj; + return type === 'function' || type === 'object' && !!obj; + } + + // Is a given value equal to null? + function isNull(obj) { + return obj === null; + } + + // Is a given variable undefined? + function isUndefined(obj) { + return obj === void 0; + } + + // Is a given value a boolean? + function isBoolean(obj) { + return obj === true || obj === false || toString.call(obj) === '[object Boolean]'; + } + + // Is a given value a DOM element? + function isElement(obj) { + return !!(obj && obj.nodeType === 1); + } + + // Internal function for creating a `toString`-based type tester. + function tagTester(name) { + var tag = '[object ' + name + ']'; + return function(obj) { + return toString.call(obj) === tag; + }; + } + + var isString = tagTester('String'); + + var isNumber = tagTester('Number'); + + var isDate = tagTester('Date'); + + var isRegExp = tagTester('RegExp'); + + var isError = tagTester('Error'); + + var isSymbol = tagTester('Symbol'); + + var isArrayBuffer = tagTester('ArrayBuffer'); + + var isFunction = tagTester('Function'); + + // Optimize `isFunction` if appropriate. Work around some `typeof` bugs in old + // v8, IE 11 (#1621), Safari 8 (#1929), and PhantomJS (#2236). + var nodelist = root.document && root.document.childNodes; + if (typeof /./ != 'function' && typeof Int8Array != 'object' && typeof nodelist != 'function') { + isFunction = function(obj) { + return typeof obj == 'function' || false; + }; + } + + var isFunction$1 = isFunction; + + var hasObjectTag = tagTester('Object'); + + // In IE 10 - Edge 13, `DataView` has string tag `'[object Object]'`. + // In IE 11, the most common among them, this problem also applies to + // `Map`, `WeakMap` and `Set`. + var hasStringTagBug = ( + supportsDataView && hasObjectTag(new DataView(new ArrayBuffer(8))) + ), + isIE11 = (typeof Map !== 'undefined' && hasObjectTag(new Map)); + + var isDataView = tagTester('DataView'); + + // In IE 10 - Edge 13, we need a different heuristic + // to determine whether an object is a `DataView`. + function ie10IsDataView(obj) { + return obj != null && isFunction$1(obj.getInt8) && isArrayBuffer(obj.buffer); + } + + var isDataView$1 = (hasStringTagBug ? ie10IsDataView : isDataView); + + // Is a given value an array? + // Delegates to ECMA5's native `Array.isArray`. + var isArray = nativeIsArray || tagTester('Array'); + + // Internal function to check whether `key` is an own property name of `obj`. + function has$1(obj, key) { + return obj != null && hasOwnProperty.call(obj, key); + } + + var isArguments = tagTester('Arguments'); + + // Define a fallback version of the method in browsers (ahem, IE < 9), where + // there isn't any inspectable "Arguments" type. + (function() { + if (!isArguments(arguments)) { + isArguments = function(obj) { + return has$1(obj, 'callee'); + }; + } + }()); + + var isArguments$1 = isArguments; + + // Is a given object a finite number? + function isFinite$1(obj) { + return !isSymbol(obj) && _isFinite(obj) && !isNaN(parseFloat(obj)); + } + + // Is the given value `NaN`? + function isNaN$1(obj) { + return isNumber(obj) && _isNaN(obj); + } + + // Predicate-generating function. Often useful outside of Underscore. + function constant(value) { + return function() { + return value; + }; + } + + // Common internal logic for `isArrayLike` and `isBufferLike`. + function createSizePropertyCheck(getSizeProperty) { + return function(collection) { + var sizeProperty = getSizeProperty(collection); + return typeof sizeProperty == 'number' && sizeProperty >= 0 && sizeProperty <= MAX_ARRAY_INDEX; + } + } + + // Internal helper to generate a function to obtain property `key` from `obj`. + function shallowProperty(key) { + return function(obj) { + return obj == null ? void 0 : obj[key]; + }; + } + + // Internal helper to obtain the `byteLength` property of an object. + var getByteLength = shallowProperty('byteLength'); + + // Internal helper to determine whether we should spend extensive checks against + // `ArrayBuffer` et al. + var isBufferLike = createSizePropertyCheck(getByteLength); + + // Is a given value a typed array? + var typedArrayPattern = /\[object ((I|Ui)nt(8|16|32)|Float(32|64)|Uint8Clamped|Big(I|Ui)nt64)Array\]/; + function isTypedArray(obj) { + // `ArrayBuffer.isView` is the most future-proof, so use it when available. + // Otherwise, fall back on the above regular expression. + return nativeIsView ? (nativeIsView(obj) && !isDataView$1(obj)) : + isBufferLike(obj) && typedArrayPattern.test(toString.call(obj)); + } + + var isTypedArray$1 = supportsArrayBuffer ? isTypedArray : constant(false); + + // Internal helper to obtain the `length` property of an object. + var getLength = shallowProperty('length'); + + // Internal helper to create a simple lookup structure. + // `collectNonEnumProps` used to depend on `_.contains`, but this led to + // circular imports. `emulatedSet` is a one-off solution that only works for + // arrays of strings. + function emulatedSet(keys) { + var hash = {}; + for (var l = keys.length, i = 0; i < l; ++i) hash[keys[i]] = true; + return { + contains: function(key) { return hash[key]; }, + push: function(key) { + hash[key] = true; + return keys.push(key); + } + }; + } + + // Internal helper. Checks `keys` for the presence of keys in IE < 9 that won't + // be iterated by `for key in ...` and thus missed. Extends `keys` in place if + // needed. + function collectNonEnumProps(obj, keys) { + keys = emulatedSet(keys); + var nonEnumIdx = nonEnumerableProps.length; + var constructor = obj.constructor; + var proto = isFunction$1(constructor) && constructor.prototype || ObjProto; + + // Constructor is a special case. + var prop = 'constructor'; + if (has$1(obj, prop) && !keys.contains(prop)) keys.push(prop); + + while (nonEnumIdx--) { + prop = nonEnumerableProps[nonEnumIdx]; + if (prop in obj && obj[prop] !== proto[prop] && !keys.contains(prop)) { + keys.push(prop); + } + } + } + + // Retrieve the names of an object's own properties. + // Delegates to **ECMAScript 5**'s native `Object.keys`. + function keys(obj) { + if (!isObject(obj)) return []; + if (nativeKeys) return nativeKeys(obj); + var keys = []; + for (var key in obj) if (has$1(obj, key)) keys.push(key); + // Ahem, IE < 9. + if (hasEnumBug) collectNonEnumProps(obj, keys); + return keys; + } + + // Is a given array, string, or object empty? + // An "empty" object has no enumerable own-properties. + function isEmpty(obj) { + if (obj == null) return true; + // Skip the more expensive `toString`-based type checks if `obj` has no + // `.length`. + var length = getLength(obj); + if (typeof length == 'number' && ( + isArray(obj) || isString(obj) || isArguments$1(obj) + )) return length === 0; + return getLength(keys(obj)) === 0; + } + + // Returns whether an object has a given set of `key:value` pairs. + function isMatch(object, attrs) { + var _keys = keys(attrs), length = _keys.length; + if (object == null) return !length; + var obj = Object(object); + for (var i = 0; i < length; i++) { + var key = _keys[i]; + if (attrs[key] !== obj[key] || !(key in obj)) return false; + } + return true; + } + + // If Underscore is called as a function, it returns a wrapped object that can + // be used OO-style. This wrapper holds altered versions of all functions added + // through `_.mixin`. Wrapped objects may be chained. + function _$1(obj) { + if (obj instanceof _$1) return obj; + if (!(this instanceof _$1)) return new _$1(obj); + this._wrapped = obj; + } + + _$1.VERSION = VERSION; + + // Extracts the result from a wrapped and chained object. + _$1.prototype.value = function() { + return this._wrapped; + }; + + // Provide unwrapping proxies for some methods used in engine operations + // such as arithmetic and JSON stringification. + _$1.prototype.valueOf = _$1.prototype.toJSON = _$1.prototype.value; + + _$1.prototype.toString = function() { + return String(this._wrapped); + }; + + // Internal function to wrap or shallow-copy an ArrayBuffer, + // typed array or DataView to a new view, reusing the buffer. + function toBufferView(bufferSource) { + return new Uint8Array( + bufferSource.buffer || bufferSource, + bufferSource.byteOffset || 0, + getByteLength(bufferSource) + ); + } + + // We use this string twice, so give it a name for minification. + var tagDataView = '[object DataView]'; + + // Internal recursive comparison function for `_.isEqual`. + function eq(a, b, aStack, bStack) { + // Identical objects are equal. `0 === -0`, but they aren't identical. + // See the [Harmony `egal` proposal](https://wiki.ecmascript.org/doku.php?id=harmony:egal). + if (a === b) return a !== 0 || 1 / a === 1 / b; + // `null` or `undefined` only equal to itself (strict comparison). + if (a == null || b == null) return false; + // `NaN`s are equivalent, but non-reflexive. + if (a !== a) return b !== b; + // Exhaust primitive checks + var type = typeof a; + if (type !== 'function' && type !== 'object' && typeof b != 'object') return false; + return deepEq(a, b, aStack, bStack); + } + + // Internal recursive comparison function for `_.isEqual`. + function deepEq(a, b, aStack, bStack) { + // Unwrap any wrapped objects. + if (a instanceof _$1) a = a._wrapped; + if (b instanceof _$1) b = b._wrapped; + // Compare `[[Class]]` names. + var className = toString.call(a); + if (className !== toString.call(b)) return false; + // Work around a bug in IE 10 - Edge 13. + if (hasStringTagBug && className == '[object Object]' && isDataView$1(a)) { + if (!isDataView$1(b)) return false; + className = tagDataView; + } + switch (className) { + // These types are compared by value. + case '[object RegExp]': + // RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i') + case '[object String]': + // Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is + // equivalent to `new String("5")`. + return '' + a === '' + b; + case '[object Number]': + // `NaN`s are equivalent, but non-reflexive. + // Object(NaN) is equivalent to NaN. + if (+a !== +a) return +b !== +b; + // An `egal` comparison is performed for other numeric values. + return +a === 0 ? 1 / +a === 1 / b : +a === +b; + case '[object Date]': + case '[object Boolean]': + // Coerce dates and booleans to numeric primitive values. Dates are compared by their + // millisecond representations. Note that invalid dates with millisecond representations + // of `NaN` are not equivalent. + return +a === +b; + case '[object Symbol]': + return SymbolProto.valueOf.call(a) === SymbolProto.valueOf.call(b); + case '[object ArrayBuffer]': + case tagDataView: + // Coerce to typed array so we can fall through. + return deepEq(toBufferView(a), toBufferView(b), aStack, bStack); + } + + var areArrays = className === '[object Array]'; + if (!areArrays && isTypedArray$1(a)) { + var byteLength = getByteLength(a); + if (byteLength !== getByteLength(b)) return false; + if (a.buffer === b.buffer && a.byteOffset === b.byteOffset) return true; + areArrays = true; + } + if (!areArrays) { + if (typeof a != 'object' || typeof b != 'object') return false; + + // Objects with different constructors are not equivalent, but `Object`s or `Array`s + // from different frames are. + var aCtor = a.constructor, bCtor = b.constructor; + if (aCtor !== bCtor && !(isFunction$1(aCtor) && aCtor instanceof aCtor && + isFunction$1(bCtor) && bCtor instanceof bCtor) + && ('constructor' in a && 'constructor' in b)) { + return false; + } + } + // Assume equality for cyclic structures. The algorithm for detecting cyclic + // structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`. + + // Initializing stack of traversed objects. + // It's done here since we only need them for objects and arrays comparison. + aStack = aStack || []; + bStack = bStack || []; + var length = aStack.length; + while (length--) { + // Linear search. Performance is inversely proportional to the number of + // unique nested structures. + if (aStack[length] === a) return bStack[length] === b; + } + + // Add the first object to the stack of traversed objects. + aStack.push(a); + bStack.push(b); + + // Recursively compare objects and arrays. + if (areArrays) { + // Compare array lengths to determine if a deep comparison is necessary. + length = a.length; + if (length !== b.length) return false; + // Deep compare the contents, ignoring non-numeric properties. + while (length--) { + if (!eq(a[length], b[length], aStack, bStack)) return false; + } + } else { + // Deep compare objects. + var _keys = keys(a), key; + length = _keys.length; + // Ensure that both objects contain the same number of properties before comparing deep equality. + if (keys(b).length !== length) return false; + while (length--) { + // Deep compare each member + key = _keys[length]; + if (!(has$1(b, key) && eq(a[key], b[key], aStack, bStack))) return false; + } + } + // Remove the first object from the stack of traversed objects. + aStack.pop(); + bStack.pop(); + return true; + } + + // Perform a deep comparison to check if two objects are equal. + function isEqual(a, b) { + return eq(a, b); + } + + // Retrieve all the enumerable property names of an object. + function allKeys(obj) { + if (!isObject(obj)) return []; + var keys = []; + for (var key in obj) keys.push(key); + // Ahem, IE < 9. + if (hasEnumBug) collectNonEnumProps(obj, keys); + return keys; + } + + // Since the regular `Object.prototype.toString` type tests don't work for + // some types in IE 11, we use a fingerprinting heuristic instead, based + // on the methods. It's not great, but it's the best we got. + // The fingerprint method lists are defined below. + function ie11fingerprint(methods) { + var length = getLength(methods); + return function(obj) { + if (obj == null) return false; + // `Map`, `WeakMap` and `Set` have no enumerable keys. + var keys = allKeys(obj); + if (getLength(keys)) return false; + for (var i = 0; i < length; i++) { + if (!isFunction$1(obj[methods[i]])) return false; + } + // If we are testing against `WeakMap`, we need to ensure that + // `obj` doesn't have a `forEach` method in order to distinguish + // it from a regular `Map`. + return methods !== weakMapMethods || !isFunction$1(obj[forEachName]); + }; + } + + // In the interest of compact minification, we write + // each string in the fingerprints only once. + var forEachName = 'forEach', + hasName = 'has', + commonInit = ['clear', 'delete'], + mapTail = ['get', hasName, 'set']; + + // `Map`, `WeakMap` and `Set` each have slightly different + // combinations of the above sublists. + var mapMethods = commonInit.concat(forEachName, mapTail), + weakMapMethods = commonInit.concat(mapTail), + setMethods = ['add'].concat(commonInit, forEachName, hasName); + + var isMap = isIE11 ? ie11fingerprint(mapMethods) : tagTester('Map'); + + var isWeakMap = isIE11 ? ie11fingerprint(weakMapMethods) : tagTester('WeakMap'); + + var isSet = isIE11 ? ie11fingerprint(setMethods) : tagTester('Set'); + + var isWeakSet = tagTester('WeakSet'); + + // Retrieve the values of an object's properties. + function values(obj) { + var _keys = keys(obj); + var length = _keys.length; + var values = Array(length); + for (var i = 0; i < length; i++) { + values[i] = obj[_keys[i]]; + } + return values; + } + + // Convert an object into a list of `[key, value]` pairs. + // The opposite of `_.object` with one argument. + function pairs(obj) { + var _keys = keys(obj); + var length = _keys.length; + var pairs = Array(length); + for (var i = 0; i < length; i++) { + pairs[i] = [_keys[i], obj[_keys[i]]]; + } + return pairs; + } + + // Invert the keys and values of an object. The values must be serializable. + function invert(obj) { + var result = {}; + var _keys = keys(obj); + for (var i = 0, length = _keys.length; i < length; i++) { + result[obj[_keys[i]]] = _keys[i]; + } + return result; + } + + // Return a sorted list of the function names available on the object. + function functions(obj) { + var names = []; + for (var key in obj) { + if (isFunction$1(obj[key])) names.push(key); + } + return names.sort(); + } + + // An internal function for creating assigner functions. + function createAssigner(keysFunc, defaults) { + return function(obj) { + var length = arguments.length; + if (defaults) obj = Object(obj); + if (length < 2 || obj == null) return obj; + for (var index = 1; index < length; index++) { + var source = arguments[index], + keys = keysFunc(source), + l = keys.length; + for (var i = 0; i < l; i++) { + var key = keys[i]; + if (!defaults || obj[key] === void 0) obj[key] = source[key]; + } + } + return obj; + }; + } + + // Extend a given object with all the properties in passed-in object(s). + var extend = createAssigner(allKeys); + + // Assigns a given object with all the own properties in the passed-in + // object(s). + // (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign) + var extendOwn = createAssigner(keys); + + // Fill in a given object with default properties. + var defaults = createAssigner(allKeys, true); + + // Create a naked function reference for surrogate-prototype-swapping. + function ctor() { + return function(){}; + } + + // An internal function for creating a new object that inherits from another. + function baseCreate(prototype) { + if (!isObject(prototype)) return {}; + if (nativeCreate) return nativeCreate(prototype); + var Ctor = ctor(); + Ctor.prototype = prototype; + var result = new Ctor; + Ctor.prototype = null; + return result; + } + + // Creates an object that inherits from the given prototype object. + // If additional properties are provided then they will be added to the + // created object. + function create(prototype, props) { + var result = baseCreate(prototype); + if (props) extendOwn(result, props); + return result; + } + + // Create a (shallow-cloned) duplicate of an object. + function clone(obj) { + if (!isObject(obj)) return obj; + return isArray(obj) ? obj.slice() : extend({}, obj); + } + + // Invokes `interceptor` with the `obj` and then returns `obj`. + // The primary purpose of this method is to "tap into" a method chain, in + // order to perform operations on intermediate results within the chain. + function tap(obj, interceptor) { + interceptor(obj); + return obj; + } + + // Normalize a (deep) property `path` to array. + // Like `_.iteratee`, this function can be customized. + function toPath$1(path) { + return isArray(path) ? path : [path]; + } + _$1.toPath = toPath$1; + + // Internal wrapper for `_.toPath` to enable minification. + // Similar to `cb` for `_.iteratee`. + function toPath(path) { + return _$1.toPath(path); + } + + // Internal function to obtain a nested property in `obj` along `path`. + function deepGet(obj, path) { + var length = path.length; + for (var i = 0; i < length; i++) { + if (obj == null) return void 0; + obj = obj[path[i]]; + } + return length ? obj : void 0; + } + + // Get the value of the (deep) property on `path` from `object`. + // If any property in `path` does not exist or if the value is + // `undefined`, return `defaultValue` instead. + // The `path` is normalized through `_.toPath`. + function get(object, path, defaultValue) { + var value = deepGet(object, toPath(path)); + return isUndefined(value) ? defaultValue : value; + } + + // Shortcut function for checking if an object has a given property directly on + // itself (in other words, not on a prototype). Unlike the internal `has` + // function, this public version can also traverse nested properties. + function has(obj, path) { + path = toPath(path); + var length = path.length; + for (var i = 0; i < length; i++) { + var key = path[i]; + if (!has$1(obj, key)) return false; + obj = obj[key]; + } + return !!length; + } + + // Keep the identity function around for default iteratees. + function identity(value) { + return value; + } + + // Returns a predicate for checking whether an object has a given set of + // `key:value` pairs. + function matcher(attrs) { + attrs = extendOwn({}, attrs); + return function(obj) { + return isMatch(obj, attrs); + }; + } + + // Creates a function that, when passed an object, will traverse that object’s + // properties down the given `path`, specified as an array of keys or indices. + function property(path) { + path = toPath(path); + return function(obj) { + return deepGet(obj, path); + }; + } + + // Internal function that returns an efficient (for current engines) version + // of the passed-in callback, to be repeatedly applied in other Underscore + // functions. + function optimizeCb(func, context, argCount) { + if (context === void 0) return func; + switch (argCount == null ? 3 : argCount) { + case 1: return function(value) { + return func.call(context, value); + }; + // The 2-argument case is omitted because we’re not using it. + case 3: return function(value, index, collection) { + return func.call(context, value, index, collection); + }; + case 4: return function(accumulator, value, index, collection) { + return func.call(context, accumulator, value, index, collection); + }; + } + return function() { + return func.apply(context, arguments); + }; + } + + // An internal function to generate callbacks that can be applied to each + // element in a collection, returning the desired result — either `_.identity`, + // an arbitrary callback, a property matcher, or a property accessor. + function baseIteratee(value, context, argCount) { + if (value == null) return identity; + if (isFunction$1(value)) return optimizeCb(value, context, argCount); + if (isObject(value) && !isArray(value)) return matcher(value); + return property(value); + } + + // External wrapper for our callback generator. Users may customize + // `_.iteratee` if they want additional predicate/iteratee shorthand styles. + // This abstraction hides the internal-only `argCount` argument. + function iteratee(value, context) { + return baseIteratee(value, context, Infinity); + } + _$1.iteratee = iteratee; + + // The function we call internally to generate a callback. It invokes + // `_.iteratee` if overridden, otherwise `baseIteratee`. + function cb(value, context, argCount) { + if (_$1.iteratee !== iteratee) return _$1.iteratee(value, context); + return baseIteratee(value, context, argCount); + } + + // Returns the results of applying the `iteratee` to each element of `obj`. + // In contrast to `_.map` it returns an object. + function mapObject(obj, iteratee, context) { + iteratee = cb(iteratee, context); + var _keys = keys(obj), + length = _keys.length, + results = {}; + for (var index = 0; index < length; index++) { + var currentKey = _keys[index]; + results[currentKey] = iteratee(obj[currentKey], currentKey, obj); + } + return results; + } + + // Predicate-generating function. Often useful outside of Underscore. + function noop(){} + + // Generates a function for a given object that returns a given property. + function propertyOf(obj) { + if (obj == null) return noop; + return function(path) { + return get(obj, path); + }; + } + + // Run a function **n** times. + function times(n, iteratee, context) { + var accum = Array(Math.max(0, n)); + iteratee = optimizeCb(iteratee, context, 1); + for (var i = 0; i < n; i++) accum[i] = iteratee(i); + return accum; + } + + // Return a random integer between `min` and `max` (inclusive). + function random(min, max) { + if (max == null) { + max = min; + min = 0; + } + return min + Math.floor(Math.random() * (max - min + 1)); + } + + // A (possibly faster) way to get the current timestamp as an integer. + var now = Date.now || function() { + return new Date().getTime(); + }; + + // Internal helper to generate functions for escaping and unescaping strings + // to/from HTML interpolation. + function createEscaper(map) { + var escaper = function(match) { + return map[match]; + }; + // Regexes for identifying a key that needs to be escaped. + var source = '(?:' + keys(map).join('|') + ')'; + var testRegexp = RegExp(source); + var replaceRegexp = RegExp(source, 'g'); + return function(string) { + string = string == null ? '' : '' + string; + return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string; + }; + } + + // Internal list of HTML entities for escaping. + var escapeMap = { + '&': '&', + '<': '<', + '>': '>', + '"': '"', + "'": ''', + '`': '`' + }; + + // Function for escaping strings to HTML interpolation. + var _escape = createEscaper(escapeMap); + + // Internal list of HTML entities for unescaping. + var unescapeMap = invert(escapeMap); + + // Function for unescaping strings from HTML interpolation. + var _unescape = createEscaper(unescapeMap); + + // By default, Underscore uses ERB-style template delimiters. Change the + // following template settings to use alternative delimiters. + var templateSettings = _$1.templateSettings = { + evaluate: /<%([\s\S]+?)%>/g, + interpolate: /<%=([\s\S]+?)%>/g, + escape: /<%-([\s\S]+?)%>/g + }; + + // When customizing `_.templateSettings`, if you don't want to define an + // interpolation, evaluation or escaping regex, we need one that is + // guaranteed not to match. + var noMatch = /(.)^/; + + // Certain characters need to be escaped so that they can be put into a + // string literal. + var escapes = { + "'": "'", + '\\': '\\', + '\r': 'r', + '\n': 'n', + '\u2028': 'u2028', + '\u2029': 'u2029' + }; + + var escapeRegExp = /\\|'|\r|\n|\u2028|\u2029/g; + + function escapeChar(match) { + return '\\' + escapes[match]; + } + + // In order to prevent third-party code injection through + // `_.templateSettings.variable`, we test it against the following regular + // expression. It is intentionally a bit more liberal than just matching valid + // identifiers, but still prevents possible loopholes through defaults or + // destructuring assignment. + var bareIdentifier = /^\s*(\w|\$)+\s*$/; + + // JavaScript micro-templating, similar to John Resig's implementation. + // Underscore templating handles arbitrary delimiters, preserves whitespace, + // and correctly escapes quotes within interpolated code. + // NB: `oldSettings` only exists for backwards compatibility. + function template(text, settings, oldSettings) { + if (!settings && oldSettings) settings = oldSettings; + settings = defaults({}, settings, _$1.templateSettings); + + // Combine delimiters into one regular expression via alternation. + var matcher = RegExp([ + (settings.escape || noMatch).source, + (settings.interpolate || noMatch).source, + (settings.evaluate || noMatch).source + ].join('|') + '|$', 'g'); + + // Compile the template source, escaping string literals appropriately. + var index = 0; + var source = "__p+='"; + text.replace(matcher, function(match, escape, interpolate, evaluate, offset) { + source += text.slice(index, offset).replace(escapeRegExp, escapeChar); + index = offset + match.length; + + if (escape) { + source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'"; + } else if (interpolate) { + source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'"; + } else if (evaluate) { + source += "';\n" + evaluate + "\n__p+='"; + } + + // Adobe VMs need the match returned to produce the correct offset. + return match; + }); + source += "';\n"; + + var argument = settings.variable; + if (argument) { + // Insure against third-party code injection. (CVE-2021-23358) + if (!bareIdentifier.test(argument)) throw new Error( + 'variable is not a bare identifier: ' + argument + ); + } else { + // If a variable is not specified, place data values in local scope. + source = 'with(obj||{}){\n' + source + '}\n'; + argument = 'obj'; + } + + source = "var __t,__p='',__j=Array.prototype.join," + + "print=function(){__p+=__j.call(arguments,'');};\n" + + source + 'return __p;\n'; + + var render; + try { + render = new Function(argument, '_', source); + } catch (e) { + e.source = source; + throw e; + } + + var template = function(data) { + return render.call(this, data, _$1); + }; + + // Provide the compiled source as a convenience for precompilation. + template.source = 'function(' + argument + '){\n' + source + '}'; + + return template; + } + + // Traverses the children of `obj` along `path`. If a child is a function, it + // is invoked with its parent as context. Returns the value of the final + // child, or `fallback` if any child is undefined. + function result(obj, path, fallback) { + path = toPath(path); + var length = path.length; + if (!length) { + return isFunction$1(fallback) ? fallback.call(obj) : fallback; + } + for (var i = 0; i < length; i++) { + var prop = obj == null ? void 0 : obj[path[i]]; + if (prop === void 0) { + prop = fallback; + i = length; // Ensure we don't continue iterating. + } + obj = isFunction$1(prop) ? prop.call(obj) : prop; + } + return obj; + } + + // Generate a unique integer id (unique within the entire client session). + // Useful for temporary DOM ids. + var idCounter = 0; + function uniqueId(prefix) { + var id = ++idCounter + ''; + return prefix ? prefix + id : id; + } + + // Start chaining a wrapped Underscore object. + function chain(obj) { + var instance = _$1(obj); + instance._chain = true; + return instance; + } + + // Internal function to execute `sourceFunc` bound to `context` with optional + // `args`. Determines whether to execute a function as a constructor or as a + // normal function. + function executeBound(sourceFunc, boundFunc, context, callingContext, args) { + if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args); + var self = baseCreate(sourceFunc.prototype); + var result = sourceFunc.apply(self, args); + if (isObject(result)) return result; + return self; + } + + // Partially apply a function by creating a version that has had some of its + // arguments pre-filled, without changing its dynamic `this` context. `_` acts + // as a placeholder by default, allowing any combination of arguments to be + // pre-filled. Set `_.partial.placeholder` for a custom placeholder argument. + var partial = restArguments(function(func, boundArgs) { + var placeholder = partial.placeholder; + var bound = function() { + var position = 0, length = boundArgs.length; + var args = Array(length); + for (var i = 0; i < length; i++) { + args[i] = boundArgs[i] === placeholder ? arguments[position++] : boundArgs[i]; + } + while (position < arguments.length) args.push(arguments[position++]); + return executeBound(func, bound, this, this, args); + }; + return bound; + }); + + partial.placeholder = _$1; + + // Create a function bound to a given object (assigning `this`, and arguments, + // optionally). + var bind = restArguments(function(func, context, args) { + if (!isFunction$1(func)) throw new TypeError('Bind must be called on a function'); + var bound = restArguments(function(callArgs) { + return executeBound(func, bound, context, this, args.concat(callArgs)); + }); + return bound; + }); + + // Internal helper for collection methods to determine whether a collection + // should be iterated as an array or as an object. + // Related: https://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength + // Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094 + var isArrayLike = createSizePropertyCheck(getLength); + + // Internal implementation of a recursive `flatten` function. + function flatten$1(input, depth, strict, output) { + output = output || []; + if (!depth && depth !== 0) { + depth = Infinity; + } else if (depth <= 0) { + return output.concat(input); + } + var idx = output.length; + for (var i = 0, length = getLength(input); i < length; i++) { + var value = input[i]; + if (isArrayLike(value) && (isArray(value) || isArguments$1(value))) { + // Flatten current level of array or arguments object. + if (depth > 1) { + flatten$1(value, depth - 1, strict, output); + idx = output.length; + } else { + var j = 0, len = value.length; + while (j < len) output[idx++] = value[j++]; + } + } else if (!strict) { + output[idx++] = value; + } + } + return output; + } + + // Bind a number of an object's methods to that object. Remaining arguments + // are the method names to be bound. Useful for ensuring that all callbacks + // defined on an object belong to it. + var bindAll = restArguments(function(obj, keys) { + keys = flatten$1(keys, false, false); + var index = keys.length; + if (index < 1) throw new Error('bindAll must be passed function names'); + while (index--) { + var key = keys[index]; + obj[key] = bind(obj[key], obj); + } + return obj; + }); + + // Memoize an expensive function by storing its results. + function memoize(func, hasher) { + var memoize = function(key) { + var cache = memoize.cache; + var address = '' + (hasher ? hasher.apply(this, arguments) : key); + if (!has$1(cache, address)) cache[address] = func.apply(this, arguments); + return cache[address]; + }; + memoize.cache = {}; + return memoize; + } + + // Delays a function for the given number of milliseconds, and then calls + // it with the arguments supplied. + var delay = restArguments(function(func, wait, args) { + return setTimeout(function() { + return func.apply(null, args); + }, wait); + }); + + // Defers a function, scheduling it to run after the current call stack has + // cleared. + var defer = partial(delay, _$1, 1); + + // Returns a function, that, when invoked, will only be triggered at most once + // during a given window of time. Normally, the throttled function will run + // as much as it can, without ever going more than once per `wait` duration; + // but if you'd like to disable the execution on the leading edge, pass + // `{leading: false}`. To disable execution on the trailing edge, ditto. + function throttle(func, wait, options) { + var timeout, context, args, result; + var previous = 0; + if (!options) options = {}; + + var later = function() { + previous = options.leading === false ? 0 : now(); + timeout = null; + result = func.apply(context, args); + if (!timeout) context = args = null; + }; + + var throttled = function() { + var _now = now(); + if (!previous && options.leading === false) previous = _now; + var remaining = wait - (_now - previous); + context = this; + args = arguments; + if (remaining <= 0 || remaining > wait) { + if (timeout) { + clearTimeout(timeout); + timeout = null; + } + previous = _now; + result = func.apply(context, args); + if (!timeout) context = args = null; + } else if (!timeout && options.trailing !== false) { + timeout = setTimeout(later, remaining); + } + return result; + }; + + throttled.cancel = function() { + clearTimeout(timeout); + previous = 0; + timeout = context = args = null; + }; + + return throttled; + } + + // When a sequence of calls of the returned function ends, the argument + // function is triggered. The end of a sequence is defined by the `wait` + // parameter. If `immediate` is passed, the argument function will be + // triggered at the beginning of the sequence instead of at the end. + function debounce(func, wait, immediate) { + var timeout, previous, args, result, context; + + var later = function() { + var passed = now() - previous; + if (wait > passed) { + timeout = setTimeout(later, wait - passed); + } else { + timeout = null; + if (!immediate) result = func.apply(context, args); + // This check is needed because `func` can recursively invoke `debounced`. + if (!timeout) args = context = null; + } + }; + + var debounced = restArguments(function(_args) { + context = this; + args = _args; + previous = now(); + if (!timeout) { + timeout = setTimeout(later, wait); + if (immediate) result = func.apply(context, args); + } + return result; + }); + + debounced.cancel = function() { + clearTimeout(timeout); + timeout = args = context = null; + }; + + return debounced; + } + + // Returns the first function passed as an argument to the second, + // allowing you to adjust arguments, run code before and after, and + // conditionally execute the original function. + function wrap(func, wrapper) { + return partial(wrapper, func); + } + + // Returns a negated version of the passed-in predicate. + function negate(predicate) { + return function() { + return !predicate.apply(this, arguments); + }; + } + + // Returns a function that is the composition of a list of functions, each + // consuming the return value of the function that follows. + function compose() { + var args = arguments; + var start = args.length - 1; + return function() { + var i = start; + var result = args[start].apply(this, arguments); + while (i--) result = args[i].call(this, result); + return result; + }; + } + + // Returns a function that will only be executed on and after the Nth call. + function after(times, func) { + return function() { + if (--times < 1) { + return func.apply(this, arguments); + } + }; + } + + // Returns a function that will only be executed up to (but not including) the + // Nth call. + function before(times, func) { + var memo; + return function() { + if (--times > 0) { + memo = func.apply(this, arguments); + } + if (times <= 1) func = null; + return memo; + }; + } + + // Returns a function that will be executed at most one time, no matter how + // often you call it. Useful for lazy initialization. + var once = partial(before, 2); + + // Returns the first key on an object that passes a truth test. + function findKey(obj, predicate, context) { + predicate = cb(predicate, context); + var _keys = keys(obj), key; + for (var i = 0, length = _keys.length; i < length; i++) { + key = _keys[i]; + if (predicate(obj[key], key, obj)) return key; + } + } + + // Internal function to generate `_.findIndex` and `_.findLastIndex`. + function createPredicateIndexFinder(dir) { + return function(array, predicate, context) { + predicate = cb(predicate, context); + var length = getLength(array); + var index = dir > 0 ? 0 : length - 1; + for (; index >= 0 && index < length; index += dir) { + if (predicate(array[index], index, array)) return index; + } + return -1; + }; + } + + // Returns the first index on an array-like that passes a truth test. + var findIndex = createPredicateIndexFinder(1); + + // Returns the last index on an array-like that passes a truth test. + var findLastIndex = createPredicateIndexFinder(-1); + + // Use a comparator function to figure out the smallest index at which + // an object should be inserted so as to maintain order. Uses binary search. + function sortedIndex(array, obj, iteratee, context) { + iteratee = cb(iteratee, context, 1); + var value = iteratee(obj); + var low = 0, high = getLength(array); + while (low < high) { + var mid = Math.floor((low + high) / 2); + if (iteratee(array[mid]) < value) low = mid + 1; else high = mid; + } + return low; + } + + // Internal function to generate the `_.indexOf` and `_.lastIndexOf` functions. + function createIndexFinder(dir, predicateFind, sortedIndex) { + return function(array, item, idx) { + var i = 0, length = getLength(array); + if (typeof idx == 'number') { + if (dir > 0) { + i = idx >= 0 ? idx : Math.max(idx + length, i); + } else { + length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1; + } + } else if (sortedIndex && idx && length) { + idx = sortedIndex(array, item); + return array[idx] === item ? idx : -1; + } + if (item !== item) { + idx = predicateFind(slice.call(array, i, length), isNaN$1); + return idx >= 0 ? idx + i : -1; + } + for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) { + if (array[idx] === item) return idx; + } + return -1; + }; + } + + // Return the position of the first occurrence of an item in an array, + // or -1 if the item is not included in the array. + // If the array is large and already in sort order, pass `true` + // for **isSorted** to use binary search. + var indexOf = createIndexFinder(1, findIndex, sortedIndex); + + // Return the position of the last occurrence of an item in an array, + // or -1 if the item is not included in the array. + var lastIndexOf = createIndexFinder(-1, findLastIndex); + + // Return the first value which passes a truth test. + function find(obj, predicate, context) { + var keyFinder = isArrayLike(obj) ? findIndex : findKey; + var key = keyFinder(obj, predicate, context); + if (key !== void 0 && key !== -1) return obj[key]; + } + + // Convenience version of a common use case of `_.find`: getting the first + // object containing specific `key:value` pairs. + function findWhere(obj, attrs) { + return find(obj, matcher(attrs)); + } + + // The cornerstone for collection functions, an `each` + // implementation, aka `forEach`. + // Handles raw objects in addition to array-likes. Treats all + // sparse array-likes as if they were dense. + function each(obj, iteratee, context) { + iteratee = optimizeCb(iteratee, context); + var i, length; + if (isArrayLike(obj)) { + for (i = 0, length = obj.length; i < length; i++) { + iteratee(obj[i], i, obj); + } + } else { + var _keys = keys(obj); + for (i = 0, length = _keys.length; i < length; i++) { + iteratee(obj[_keys[i]], _keys[i], obj); + } + } + return obj; + } + + // Return the results of applying the iteratee to each element. + function map(obj, iteratee, context) { + iteratee = cb(iteratee, context); + var _keys = !isArrayLike(obj) && keys(obj), + length = (_keys || obj).length, + results = Array(length); + for (var index = 0; index < length; index++) { + var currentKey = _keys ? _keys[index] : index; + results[index] = iteratee(obj[currentKey], currentKey, obj); + } + return results; + } + + // Internal helper to create a reducing function, iterating left or right. + function createReduce(dir) { + // Wrap code that reassigns argument variables in a separate function than + // the one that accesses `arguments.length` to avoid a perf hit. (#1991) + var reducer = function(obj, iteratee, memo, initial) { + var _keys = !isArrayLike(obj) && keys(obj), + length = (_keys || obj).length, + index = dir > 0 ? 0 : length - 1; + if (!initial) { + memo = obj[_keys ? _keys[index] : index]; + index += dir; + } + for (; index >= 0 && index < length; index += dir) { + var currentKey = _keys ? _keys[index] : index; + memo = iteratee(memo, obj[currentKey], currentKey, obj); + } + return memo; + }; + + return function(obj, iteratee, memo, context) { + var initial = arguments.length >= 3; + return reducer(obj, optimizeCb(iteratee, context, 4), memo, initial); + }; + } + + // **Reduce** builds up a single result from a list of values, aka `inject`, + // or `foldl`. + var reduce = createReduce(1); + + // The right-associative version of reduce, also known as `foldr`. + var reduceRight = createReduce(-1); + + // Return all the elements that pass a truth test. + function filter(obj, predicate, context) { + var results = []; + predicate = cb(predicate, context); + each(obj, function(value, index, list) { + if (predicate(value, index, list)) results.push(value); + }); + return results; + } + + // Return all the elements for which a truth test fails. + function reject(obj, predicate, context) { + return filter(obj, negate(cb(predicate)), context); + } + + // Determine whether all of the elements pass a truth test. + function every(obj, predicate, context) { + predicate = cb(predicate, context); + var _keys = !isArrayLike(obj) && keys(obj), + length = (_keys || obj).length; + for (var index = 0; index < length; index++) { + var currentKey = _keys ? _keys[index] : index; + if (!predicate(obj[currentKey], currentKey, obj)) return false; + } + return true; + } + + // Determine if at least one element in the object passes a truth test. + function some(obj, predicate, context) { + predicate = cb(predicate, context); + var _keys = !isArrayLike(obj) && keys(obj), + length = (_keys || obj).length; + for (var index = 0; index < length; index++) { + var currentKey = _keys ? _keys[index] : index; + if (predicate(obj[currentKey], currentKey, obj)) return true; + } + return false; + } + + // Determine if the array or object contains a given item (using `===`). + function contains(obj, item, fromIndex, guard) { + if (!isArrayLike(obj)) obj = values(obj); + if (typeof fromIndex != 'number' || guard) fromIndex = 0; + return indexOf(obj, item, fromIndex) >= 0; + } + + // Invoke a method (with arguments) on every item in a collection. + var invoke = restArguments(function(obj, path, args) { + var contextPath, func; + if (isFunction$1(path)) { + func = path; + } else { + path = toPath(path); + contextPath = path.slice(0, -1); + path = path[path.length - 1]; + } + return map(obj, function(context) { + var method = func; + if (!method) { + if (contextPath && contextPath.length) { + context = deepGet(context, contextPath); + } + if (context == null) return void 0; + method = context[path]; + } + return method == null ? method : method.apply(context, args); + }); + }); + + // Convenience version of a common use case of `_.map`: fetching a property. + function pluck(obj, key) { + return map(obj, property(key)); + } + + // Convenience version of a common use case of `_.filter`: selecting only + // objects containing specific `key:value` pairs. + function where(obj, attrs) { + return filter(obj, matcher(attrs)); + } + + // Return the maximum element (or element-based computation). + function max(obj, iteratee, context) { + var result = -Infinity, lastComputed = -Infinity, + value, computed; + if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) { + obj = isArrayLike(obj) ? obj : values(obj); + for (var i = 0, length = obj.length; i < length; i++) { + value = obj[i]; + if (value != null && value > result) { + result = value; + } + } + } else { + iteratee = cb(iteratee, context); + each(obj, function(v, index, list) { + computed = iteratee(v, index, list); + if (computed > lastComputed || computed === -Infinity && result === -Infinity) { + result = v; + lastComputed = computed; + } + }); + } + return result; + } + + // Return the minimum element (or element-based computation). + function min(obj, iteratee, context) { + var result = Infinity, lastComputed = Infinity, + value, computed; + if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) { + obj = isArrayLike(obj) ? obj : values(obj); + for (var i = 0, length = obj.length; i < length; i++) { + value = obj[i]; + if (value != null && value < result) { + result = value; + } + } + } else { + iteratee = cb(iteratee, context); + each(obj, function(v, index, list) { + computed = iteratee(v, index, list); + if (computed < lastComputed || computed === Infinity && result === Infinity) { + result = v; + lastComputed = computed; + } + }); + } + return result; + } + + // Sample **n** random values from a collection using the modern version of the + // [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher–Yates_shuffle). + // If **n** is not specified, returns a single random element. + // The internal `guard` argument allows it to work with `_.map`. + function sample(obj, n, guard) { + if (n == null || guard) { + if (!isArrayLike(obj)) obj = values(obj); + return obj[random(obj.length - 1)]; + } + var sample = isArrayLike(obj) ? clone(obj) : values(obj); + var length = getLength(sample); + n = Math.max(Math.min(n, length), 0); + var last = length - 1; + for (var index = 0; index < n; index++) { + var rand = random(index, last); + var temp = sample[index]; + sample[index] = sample[rand]; + sample[rand] = temp; + } + return sample.slice(0, n); + } + + // Shuffle a collection. + function shuffle(obj) { + return sample(obj, Infinity); + } + + // Sort the object's values by a criterion produced by an iteratee. + function sortBy(obj, iteratee, context) { + var index = 0; + iteratee = cb(iteratee, context); + return pluck(map(obj, function(value, key, list) { + return { + value: value, + index: index++, + criteria: iteratee(value, key, list) + }; + }).sort(function(left, right) { + var a = left.criteria; + var b = right.criteria; + if (a !== b) { + if (a > b || a === void 0) return 1; + if (a < b || b === void 0) return -1; + } + return left.index - right.index; + }), 'value'); + } + + // An internal function used for aggregate "group by" operations. + function group(behavior, partition) { + return function(obj, iteratee, context) { + var result = partition ? [[], []] : {}; + iteratee = cb(iteratee, context); + each(obj, function(value, index) { + var key = iteratee(value, index, obj); + behavior(result, value, key); + }); + return result; + }; + } + + // Groups the object's values by a criterion. Pass either a string attribute + // to group by, or a function that returns the criterion. + var groupBy = group(function(result, value, key) { + if (has$1(result, key)) result[key].push(value); else result[key] = [value]; + }); + + // Indexes the object's values by a criterion, similar to `_.groupBy`, but for + // when you know that your index values will be unique. + var indexBy = group(function(result, value, key) { + result[key] = value; + }); + + // Counts instances of an object that group by a certain criterion. Pass + // either a string attribute to count by, or a function that returns the + // criterion. + var countBy = group(function(result, value, key) { + if (has$1(result, key)) result[key]++; else result[key] = 1; + }); + + // Split a collection into two arrays: one whose elements all pass the given + // truth test, and one whose elements all do not pass the truth test. + var partition = group(function(result, value, pass) { + result[pass ? 0 : 1].push(value); + }, true); + + // Safely create a real, live array from anything iterable. + var reStrSymbol = /[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g; + function toArray(obj) { + if (!obj) return []; + if (isArray(obj)) return slice.call(obj); + if (isString(obj)) { + // Keep surrogate pair characters together. + return obj.match(reStrSymbol); + } + if (isArrayLike(obj)) return map(obj, identity); + return values(obj); + } + + // Return the number of elements in a collection. + function size(obj) { + if (obj == null) return 0; + return isArrayLike(obj) ? obj.length : keys(obj).length; + } + + // Internal `_.pick` helper function to determine whether `key` is an enumerable + // property name of `obj`. + function keyInObj(value, key, obj) { + return key in obj; + } + + // Return a copy of the object only containing the allowed properties. + var pick = restArguments(function(obj, keys) { + var result = {}, iteratee = keys[0]; + if (obj == null) return result; + if (isFunction$1(iteratee)) { + if (keys.length > 1) iteratee = optimizeCb(iteratee, keys[1]); + keys = allKeys(obj); + } else { + iteratee = keyInObj; + keys = flatten$1(keys, false, false); + obj = Object(obj); + } + for (var i = 0, length = keys.length; i < length; i++) { + var key = keys[i]; + var value = obj[key]; + if (iteratee(value, key, obj)) result[key] = value; + } + return result; + }); + + // Return a copy of the object without the disallowed properties. + var omit = restArguments(function(obj, keys) { + var iteratee = keys[0], context; + if (isFunction$1(iteratee)) { + iteratee = negate(iteratee); + if (keys.length > 1) context = keys[1]; + } else { + keys = map(flatten$1(keys, false, false), String); + iteratee = function(value, key) { + return !contains(keys, key); + }; + } + return pick(obj, iteratee, context); + }); + + // Returns everything but the last entry of the array. Especially useful on + // the arguments object. Passing **n** will return all the values in + // the array, excluding the last N. + function initial(array, n, guard) { + return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n))); + } + + // Get the first element of an array. Passing **n** will return the first N + // values in the array. The **guard** check allows it to work with `_.map`. + function first(array, n, guard) { + if (array == null || array.length < 1) return n == null || guard ? void 0 : []; + if (n == null || guard) return array[0]; + return initial(array, array.length - n); + } + + // Returns everything but the first entry of the `array`. Especially useful on + // the `arguments` object. Passing an **n** will return the rest N values in the + // `array`. + function rest(array, n, guard) { + return slice.call(array, n == null || guard ? 1 : n); + } + + // Get the last element of an array. Passing **n** will return the last N + // values in the array. + function last(array, n, guard) { + if (array == null || array.length < 1) return n == null || guard ? void 0 : []; + if (n == null || guard) return array[array.length - 1]; + return rest(array, Math.max(0, array.length - n)); + } + + // Trim out all falsy values from an array. + function compact(array) { + return filter(array, Boolean); + } + + // Flatten out an array, either recursively (by default), or up to `depth`. + // Passing `true` or `false` as `depth` means `1` or `Infinity`, respectively. + function flatten(array, depth) { + return flatten$1(array, depth, false); + } + + // Take the difference between one array and a number of other arrays. + // Only the elements present in just the first array will remain. + var difference = restArguments(function(array, rest) { + rest = flatten$1(rest, true, true); + return filter(array, function(value){ + return !contains(rest, value); + }); + }); + + // Return a version of the array that does not contain the specified value(s). + var without = restArguments(function(array, otherArrays) { + return difference(array, otherArrays); + }); + + // Produce a duplicate-free version of the array. If the array has already + // been sorted, you have the option of using a faster algorithm. + // The faster algorithm will not work with an iteratee if the iteratee + // is not a one-to-one function, so providing an iteratee will disable + // the faster algorithm. + function uniq(array, isSorted, iteratee, context) { + if (!isBoolean(isSorted)) { + context = iteratee; + iteratee = isSorted; + isSorted = false; + } + if (iteratee != null) iteratee = cb(iteratee, context); + var result = []; + var seen = []; + for (var i = 0, length = getLength(array); i < length; i++) { + var value = array[i], + computed = iteratee ? iteratee(value, i, array) : value; + if (isSorted && !iteratee) { + if (!i || seen !== computed) result.push(value); + seen = computed; + } else if (iteratee) { + if (!contains(seen, computed)) { + seen.push(computed); + result.push(value); + } + } else if (!contains(result, value)) { + result.push(value); + } + } + return result; + } + + // Produce an array that contains the union: each distinct element from all of + // the passed-in arrays. + var union = restArguments(function(arrays) { + return uniq(flatten$1(arrays, true, true)); + }); + + // Produce an array that contains every item shared between all the + // passed-in arrays. + function intersection(array) { + var result = []; + var argsLength = arguments.length; + for (var i = 0, length = getLength(array); i < length; i++) { + var item = array[i]; + if (contains(result, item)) continue; + var j; + for (j = 1; j < argsLength; j++) { + if (!contains(arguments[j], item)) break; + } + if (j === argsLength) result.push(item); + } + return result; + } + + // Complement of zip. Unzip accepts an array of arrays and groups + // each array's elements on shared indices. + function unzip(array) { + var length = array && max(array, getLength).length || 0; + var result = Array(length); + + for (var index = 0; index < length; index++) { + result[index] = pluck(array, index); + } + return result; + } + + // Zip together multiple lists into a single array -- elements that share + // an index go together. + var zip = restArguments(unzip); + + // Converts lists into objects. Pass either a single array of `[key, value]` + // pairs, or two parallel arrays of the same length -- one of keys, and one of + // the corresponding values. Passing by pairs is the reverse of `_.pairs`. + function object(list, values) { + var result = {}; + for (var i = 0, length = getLength(list); i < length; i++) { + if (values) { + result[list[i]] = values[i]; + } else { + result[list[i][0]] = list[i][1]; + } + } + return result; + } + + // Generate an integer Array containing an arithmetic progression. A port of + // the native Python `range()` function. See + // [the Python documentation](https://docs.python.org/library/functions.html#range). + function range(start, stop, step) { + if (stop == null) { + stop = start || 0; + start = 0; + } + if (!step) { + step = stop < start ? -1 : 1; + } + + var length = Math.max(Math.ceil((stop - start) / step), 0); + var range = Array(length); + + for (var idx = 0; idx < length; idx++, start += step) { + range[idx] = start; + } + + return range; + } + + // Chunk a single array into multiple arrays, each containing `count` or fewer + // items. + function chunk(array, count) { + if (count == null || count < 1) return []; + var result = []; + var i = 0, length = array.length; + while (i < length) { + result.push(slice.call(array, i, i += count)); + } + return result; + } + + // Helper function to continue chaining intermediate results. + function chainResult(instance, obj) { + return instance._chain ? _$1(obj).chain() : obj; + } + + // Add your own custom functions to the Underscore object. + function mixin(obj) { + each(functions(obj), function(name) { + var func = _$1[name] = obj[name]; + _$1.prototype[name] = function() { + var args = [this._wrapped]; + push.apply(args, arguments); + return chainResult(this, func.apply(_$1, args)); + }; + }); + return _$1; + } + + // Add all mutator `Array` functions to the wrapper. + each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) { + var method = ArrayProto[name]; + _$1.prototype[name] = function() { + var obj = this._wrapped; + if (obj != null) { + method.apply(obj, arguments); + if ((name === 'shift' || name === 'splice') && obj.length === 0) { + delete obj[0]; + } + } + return chainResult(this, obj); + }; + }); + + // Add all accessor `Array` functions to the wrapper. + each(['concat', 'join', 'slice'], function(name) { + var method = ArrayProto[name]; + _$1.prototype[name] = function() { + var obj = this._wrapped; + if (obj != null) obj = method.apply(obj, arguments); + return chainResult(this, obj); + }; + }); + + // Named Exports + + var allExports = { + __proto__: null, + VERSION: VERSION, + restArguments: restArguments, + isObject: isObject, + isNull: isNull, + isUndefined: isUndefined, + isBoolean: isBoolean, + isElement: isElement, + isString: isString, + isNumber: isNumber, + isDate: isDate, + isRegExp: isRegExp, + isError: isError, + isSymbol: isSymbol, + isArrayBuffer: isArrayBuffer, + isDataView: isDataView$1, + isArray: isArray, + isFunction: isFunction$1, + isArguments: isArguments$1, + isFinite: isFinite$1, + isNaN: isNaN$1, + isTypedArray: isTypedArray$1, + isEmpty: isEmpty, + isMatch: isMatch, + isEqual: isEqual, + isMap: isMap, + isWeakMap: isWeakMap, + isSet: isSet, + isWeakSet: isWeakSet, + keys: keys, + allKeys: allKeys, + values: values, + pairs: pairs, + invert: invert, + functions: functions, + methods: functions, + extend: extend, + extendOwn: extendOwn, + assign: extendOwn, + defaults: defaults, + create: create, + clone: clone, + tap: tap, + get: get, + has: has, + mapObject: mapObject, + identity: identity, + constant: constant, + noop: noop, + toPath: toPath$1, + property: property, + propertyOf: propertyOf, + matcher: matcher, + matches: matcher, + times: times, + random: random, + now: now, + escape: _escape, + unescape: _unescape, + templateSettings: templateSettings, + template: template, + result: result, + uniqueId: uniqueId, + chain: chain, + iteratee: iteratee, + partial: partial, + bind: bind, + bindAll: bindAll, + memoize: memoize, + delay: delay, + defer: defer, + throttle: throttle, + debounce: debounce, + wrap: wrap, + negate: negate, + compose: compose, + after: after, + before: before, + once: once, + findKey: findKey, + findIndex: findIndex, + findLastIndex: findLastIndex, + sortedIndex: sortedIndex, + indexOf: indexOf, + lastIndexOf: lastIndexOf, + find: find, + detect: find, + findWhere: findWhere, + each: each, + forEach: each, + map: map, + collect: map, + reduce: reduce, + foldl: reduce, + inject: reduce, + reduceRight: reduceRight, + foldr: reduceRight, + filter: filter, + select: filter, + reject: reject, + every: every, + all: every, + some: some, + any: some, + contains: contains, + includes: contains, + include: contains, + invoke: invoke, + pluck: pluck, + where: where, + max: max, + min: min, + shuffle: shuffle, + sample: sample, + sortBy: sortBy, + groupBy: groupBy, + indexBy: indexBy, + countBy: countBy, + partition: partition, + toArray: toArray, + size: size, + pick: pick, + omit: omit, + first: first, + head: first, + take: first, + initial: initial, + last: last, + rest: rest, + tail: rest, + drop: rest, + compact: compact, + flatten: flatten, + without: without, + uniq: uniq, + unique: uniq, + union: union, + intersection: intersection, + difference: difference, + unzip: unzip, + transpose: unzip, + zip: zip, + object: object, + range: range, + chunk: chunk, + mixin: mixin, + 'default': _$1 + }; + + // Default Export + + // Add all of the Underscore functions to the wrapper object. + var _ = mixin(allExports); + // Legacy Node.js API. + _._ = _; + + return _; + +}))); +//# sourceMappingURL=underscore-umd.js.map diff --git a/doc/build/html/_static/up-pressed.png b/doc/build/html/_static/up-pressed.png deleted file mode 100644 index acee3b68..00000000 Binary files a/doc/build/html/_static/up-pressed.png and /dev/null differ diff --git a/doc/build/html/_static/up.png b/doc/build/html/_static/up.png deleted file mode 100644 index 2a940a7d..00000000 Binary files a/doc/build/html/_static/up.png and /dev/null differ diff --git a/doc/build/html/_static/watermark.png b/doc/build/html/_static/watermark.png deleted file mode 100644 index d71dc4bb..00000000 Binary files a/doc/build/html/_static/watermark.png and /dev/null differ diff --git a/doc/build/html/_static/watermark_blur.png b/doc/build/html/_static/watermark_blur.png deleted file mode 100644 index 9fc0b6d3..00000000 Binary files a/doc/build/html/_static/watermark_blur.png and /dev/null differ diff --git a/doc/build/html/_static/websupport.js b/doc/build/html/_static/websupport.js deleted file mode 100644 index 98e7f40b..00000000 --- a/doc/build/html/_static/websupport.js +++ /dev/null @@ -1,808 +0,0 @@ -/* - * websupport.js - * ~~~~~~~~~~~~~ - * - * sphinx.websupport utilities for all documentation. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -(function($) { - $.fn.autogrow = function() { - return this.each(function() { - var textarea = this; - - $.fn.autogrow.resize(textarea); - - $(textarea) - .focus(function() { - textarea.interval = setInterval(function() { - $.fn.autogrow.resize(textarea); - }, 500); - }) - .blur(function() { - clearInterval(textarea.interval); - }); - }); - }; - - $.fn.autogrow.resize = function(textarea) { - var lineHeight = parseInt($(textarea).css('line-height'), 10); - var lines = textarea.value.split('\n'); - var columns = textarea.cols; - var lineCount = 0; - $.each(lines, function() { - lineCount += Math.ceil(this.length / columns) || 1; - }); - var height = lineHeight * (lineCount + 1); - $(textarea).css('height', height); - }; -})(jQuery); - -(function($) { - var comp, by; - - function init() { - initEvents(); - initComparator(); - } - - function initEvents() { - $(document).on("click", 'a.comment-close', function(event) { - event.preventDefault(); - hide($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.vote', function(event) { - event.preventDefault(); - handleVote($(this)); - }); - $(document).on("click", 'a.reply', function(event) { - event.preventDefault(); - openReply($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.close-reply', function(event) { - event.preventDefault(); - closeReply($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.sort-option', function(event) { - event.preventDefault(); - handleReSort($(this)); - }); - $(document).on("click", 'a.show-proposal', function(event) { - event.preventDefault(); - showProposal($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.hide-proposal', function(event) { - event.preventDefault(); - hideProposal($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.show-propose-change', function(event) { - event.preventDefault(); - showProposeChange($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.hide-propose-change', function(event) { - event.preventDefault(); - hideProposeChange($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.accept-comment', function(event) { - event.preventDefault(); - acceptComment($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.delete-comment', function(event) { - event.preventDefault(); - deleteComment($(this).attr('id').substring(2)); - }); - $(document).on("click", 'a.comment-markup', function(event) { - event.preventDefault(); - toggleCommentMarkupBox($(this).attr('id').substring(2)); - }); - } - - /** - * Set comp, which is a comparator function used for sorting and - * inserting comments into the list. - */ - function setComparator() { - // If the first three letters are "asc", sort in ascending order - // and remove the prefix. - if (by.substring(0,3) == 'asc') { - var i = by.substring(3); - comp = function(a, b) { return a[i] - b[i]; }; - } else { - // Otherwise sort in descending order. - comp = function(a, b) { return b[by] - a[by]; }; - } - - // Reset link styles and format the selected sort option. - $('a.sel').attr('href', '#').removeClass('sel'); - $('a.by' + by).removeAttr('href').addClass('sel'); - } - - /** - * Create a comp function. If the user has preferences stored in - * the sortBy cookie, use those, otherwise use the default. - */ - function initComparator() { - by = 'rating'; // Default to sort by rating. - // If the sortBy cookie is set, use that instead. - if (document.cookie.length > 0) { - var start = document.cookie.indexOf('sortBy='); - if (start != -1) { - start = start + 7; - var end = document.cookie.indexOf(";", start); - if (end == -1) { - end = document.cookie.length; - by = unescape(document.cookie.substring(start, end)); - } - } - } - setComparator(); - } - - /** - * Show a comment div. - */ - function show(id) { - $('#ao' + id).hide(); - $('#ah' + id).show(); - var context = $.extend({id: id}, opts); - var popup = $(renderTemplate(popupTemplate, context)).hide(); - popup.find('textarea[name="proposal"]').hide(); - popup.find('a.by' + by).addClass('sel'); - var form = popup.find('#cf' + id); - form.submit(function(event) { - event.preventDefault(); - addComment(form); - }); - $('#s' + id).after(popup); - popup.slideDown('fast', function() { - getComments(id); - }); - } - - /** - * Hide a comment div. - */ - function hide(id) { - $('#ah' + id).hide(); - $('#ao' + id).show(); - var div = $('#sc' + id); - div.slideUp('fast', function() { - div.remove(); - }); - } - - /** - * Perform an ajax request to get comments for a node - * and insert the comments into the comments tree. - */ - function getComments(id) { - $.ajax({ - type: 'GET', - url: opts.getCommentsURL, - data: {node: id}, - success: function(data, textStatus, request) { - var ul = $('#cl' + id); - var speed = 100; - $('#cf' + id) - .find('textarea[name="proposal"]') - .data('source', data.source); - - if (data.comments.length === 0) { - ul.html('
  • No comments yet.
  • '); - ul.data('empty', true); - } else { - // If there are comments, sort them and put them in the list. - var comments = sortComments(data.comments); - speed = data.comments.length * 100; - appendComments(comments, ul); - ul.data('empty', false); - } - $('#cn' + id).slideUp(speed + 200); - ul.slideDown(speed); - }, - error: function(request, textStatus, error) { - showError('Oops, there was a problem retrieving the comments.'); - }, - dataType: 'json' - }); - } - - /** - * Add a comment via ajax and insert the comment into the comment tree. - */ - function addComment(form) { - var node_id = form.find('input[name="node"]').val(); - var parent_id = form.find('input[name="parent"]').val(); - var text = form.find('textarea[name="comment"]').val(); - var proposal = form.find('textarea[name="proposal"]').val(); - - if (text == '') { - showError('Please enter a comment.'); - return; - } - - // Disable the form that is being submitted. - form.find('textarea,input').attr('disabled', 'disabled'); - - // Send the comment to the server. - $.ajax({ - type: "POST", - url: opts.addCommentURL, - dataType: 'json', - data: { - node: node_id, - parent: parent_id, - text: text, - proposal: proposal - }, - success: function(data, textStatus, error) { - // Reset the form. - if (node_id) { - hideProposeChange(node_id); - } - form.find('textarea') - .val('') - .add(form.find('input')) - .removeAttr('disabled'); - var ul = $('#cl' + (node_id || parent_id)); - if (ul.data('empty')) { - $(ul).empty(); - ul.data('empty', false); - } - insertComment(data.comment); - var ao = $('#ao' + node_id); - ao.find('img').attr({'src': opts.commentBrightImage}); - if (node_id) { - // if this was a "root" comment, remove the commenting box - // (the user can get it back by reopening the comment popup) - $('#ca' + node_id).slideUp(); - } - }, - error: function(request, textStatus, error) { - form.find('textarea,input').removeAttr('disabled'); - showError('Oops, there was a problem adding the comment.'); - } - }); - } - - /** - * Recursively append comments to the main comment list and children - * lists, creating the comment tree. - */ - function appendComments(comments, ul) { - $.each(comments, function() { - var div = createCommentDiv(this); - ul.append($(document.createElement('li')).html(div)); - appendComments(this.children, div.find('ul.comment-children')); - // To avoid stagnating data, don't store the comments children in data. - this.children = null; - div.data('comment', this); - }); - } - - /** - * After adding a new comment, it must be inserted in the correct - * location in the comment tree. - */ - function insertComment(comment) { - var div = createCommentDiv(comment); - - // To avoid stagnating data, don't store the comments children in data. - comment.children = null; - div.data('comment', comment); - - var ul = $('#cl' + (comment.node || comment.parent)); - var siblings = getChildren(ul); - - var li = $(document.createElement('li')); - li.hide(); - - // Determine where in the parents children list to insert this comment. - for(i=0; i < siblings.length; i++) { - if (comp(comment, siblings[i]) <= 0) { - $('#cd' + siblings[i].id) - .parent() - .before(li.html(div)); - li.slideDown('fast'); - return; - } - } - - // If we get here, this comment rates lower than all the others, - // or it is the only comment in the list. - ul.append(li.html(div)); - li.slideDown('fast'); - } - - function acceptComment(id) { - $.ajax({ - type: 'POST', - url: opts.acceptCommentURL, - data: {id: id}, - success: function(data, textStatus, request) { - $('#cm' + id).fadeOut('fast'); - $('#cd' + id).removeClass('moderate'); - }, - error: function(request, textStatus, error) { - showError('Oops, there was a problem accepting the comment.'); - } - }); - } - - function deleteComment(id) { - $.ajax({ - type: 'POST', - url: opts.deleteCommentURL, - data: {id: id}, - success: function(data, textStatus, request) { - var div = $('#cd' + id); - if (data == 'delete') { - // Moderator mode: remove the comment and all children immediately - div.slideUp('fast', function() { - div.remove(); - }); - return; - } - // User mode: only mark the comment as deleted - div - .find('span.user-id:first') - .text('[deleted]').end() - .find('div.comment-text:first') - .text('[deleted]').end() - .find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id + - ', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id) - .remove(); - var comment = div.data('comment'); - comment.username = '[deleted]'; - comment.text = '[deleted]'; - div.data('comment', comment); - }, - error: function(request, textStatus, error) { - showError('Oops, there was a problem deleting the comment.'); - } - }); - } - - function showProposal(id) { - $('#sp' + id).hide(); - $('#hp' + id).show(); - $('#pr' + id).slideDown('fast'); - } - - function hideProposal(id) { - $('#hp' + id).hide(); - $('#sp' + id).show(); - $('#pr' + id).slideUp('fast'); - } - - function showProposeChange(id) { - $('#pc' + id).hide(); - $('#hc' + id).show(); - var textarea = $('#pt' + id); - textarea.val(textarea.data('source')); - $.fn.autogrow.resize(textarea[0]); - textarea.slideDown('fast'); - } - - function hideProposeChange(id) { - $('#hc' + id).hide(); - $('#pc' + id).show(); - var textarea = $('#pt' + id); - textarea.val('').removeAttr('disabled'); - textarea.slideUp('fast'); - } - - function toggleCommentMarkupBox(id) { - $('#mb' + id).toggle(); - } - - /** Handle when the user clicks on a sort by link. */ - function handleReSort(link) { - var classes = link.attr('class').split(/\s+/); - for (var i=0; iThank you! Your comment will show up ' - + 'once it is has been approved by a moderator.
    '); - } - // Prettify the comment rating. - comment.pretty_rating = comment.rating + ' point' + - (comment.rating == 1 ? '' : 's'); - // Make a class (for displaying not yet moderated comments differently) - comment.css_class = comment.displayed ? '' : ' moderate'; - // Create a div for this comment. - var context = $.extend({}, opts, comment); - var div = $(renderTemplate(commentTemplate, context)); - - // If the user has voted on this comment, highlight the correct arrow. - if (comment.vote) { - var direction = (comment.vote == 1) ? 'u' : 'd'; - div.find('#' + direction + 'v' + comment.id).hide(); - div.find('#' + direction + 'u' + comment.id).show(); - } - - if (opts.moderator || comment.text != '[deleted]') { - div.find('a.reply').show(); - if (comment.proposal_diff) - div.find('#sp' + comment.id).show(); - if (opts.moderator && !comment.displayed) - div.find('#cm' + comment.id).show(); - if (opts.moderator || (opts.username == comment.username)) - div.find('#dc' + comment.id).show(); - } - return div; - } - - /** - * A simple template renderer. Placeholders such as <%id%> are replaced - * by context['id'] with items being escaped. Placeholders such as <#id#> - * are not escaped. - */ - function renderTemplate(template, context) { - var esc = $(document.createElement('div')); - - function handle(ph, escape) { - var cur = context; - $.each(ph.split('.'), function() { - cur = cur[this]; - }); - return escape ? esc.text(cur || "").html() : cur; - } - - return template.replace(/<([%#])([\w\.]*)\1>/g, function() { - return handle(arguments[2], arguments[1] == '%' ? true : false); - }); - } - - /** Flash an error message briefly. */ - function showError(message) { - $(document.createElement('div')).attr({'class': 'popup-error'}) - .append($(document.createElement('div')) - .attr({'class': 'error-message'}).text(message)) - .appendTo('body') - .fadeIn("slow") - .delay(2000) - .fadeOut("slow"); - } - - /** Add a link the user uses to open the comments popup. */ - $.fn.comment = function() { - return this.each(function() { - var id = $(this).attr('id').substring(1); - var count = COMMENT_METADATA[id]; - var title = count + ' comment' + (count == 1 ? '' : 's'); - var image = count > 0 ? opts.commentBrightImage : opts.commentImage; - var addcls = count == 0 ? ' nocomment' : ''; - $(this) - .append( - $(document.createElement('a')).attr({ - href: '#', - 'class': 'sphinx-comment-open' + addcls, - id: 'ao' + id - }) - .append($(document.createElement('img')).attr({ - src: image, - alt: 'comment', - title: title - })) - .click(function(event) { - event.preventDefault(); - show($(this).attr('id').substring(2)); - }) - ) - .append( - $(document.createElement('a')).attr({ - href: '#', - 'class': 'sphinx-comment-close hidden', - id: 'ah' + id - }) - .append($(document.createElement('img')).attr({ - src: opts.closeCommentImage, - alt: 'close', - title: 'close' - })) - .click(function(event) { - event.preventDefault(); - hide($(this).attr('id').substring(2)); - }) - ); - }); - }; - - var opts = { - processVoteURL: '/_process_vote', - addCommentURL: '/_add_comment', - getCommentsURL: '/_get_comments', - acceptCommentURL: '/_accept_comment', - deleteCommentURL: '/_delete_comment', - commentImage: '/static/_static/comment.png', - closeCommentImage: '/static/_static/comment-close.png', - loadingImage: '/static/_static/ajax-loader.gif', - commentBrightImage: '/static/_static/comment-bright.png', - upArrow: '/static/_static/up.png', - downArrow: '/static/_static/down.png', - upArrowPressed: '/static/_static/up-pressed.png', - downArrowPressed: '/static/_static/down-pressed.png', - voting: false, - moderator: false - }; - - if (typeof COMMENT_OPTIONS != "undefined") { - opts = jQuery.extend(opts, COMMENT_OPTIONS); - } - - var popupTemplate = '\ -
    \ -

    \ - Sort by:\ - best rated\ - newest\ - oldest\ -

    \ -
    Comments
    \ -
    \ - loading comments...
    \ -
      \ -
      \ -

      Add a comment\ - (markup):

      \ -
      \ - reStructured text markup: *emph*, **strong**, \ - ``code``, \ - code blocks: :: and an indented block after blank line
      \ -
      \ - \ -

      \ - \ - Propose a change ▹\ - \ - \ - Propose a change ▿\ - \ -

      \ - \ - \ - \ - \ -
      \ -
      \ -
      '; - - var commentTemplate = '\ -
      \ -
      \ -
      \ - \ - \ - \ - \ - \ - \ -
      \ -
      \ - \ - \ - \ - \ - \ - \ -
      \ -
      \ -
      \ -

      \ - <%username%>\ - <%pretty_rating%>\ - <%time.delta%>\ -

      \ -
      <#text#>
      \ -

      \ - \ - reply ▿\ - proposal ▹\ - proposal ▿\ - \ - \ -

      \ -
      \
      -<#proposal_diff#>\
      -        
      \ -
        \ -
        \ -
        \ -
        \ -
        '; - - var replyTemplate = '\ -
      • \ -
        \ -
        \ - \ - \ - \ - \ - \ -
        \ -
        \ -
      • '; - - $(document).ready(function() { - init(); - }); -})(jQuery); - -$(document).ready(function() { - // add comment anchors for all paragraphs that are commentable - $('.sphinx-has-comment').comment(); - - // highlight search words in search results - $("div.context").each(function() { - var params = $.getQueryParameters(); - var terms = (params.q) ? params.q[0].split(/\s+/) : []; - var result = $(this); - $.each(terms, function() { - result.highlightText(this.toLowerCase(), 'highlighted'); - }); - }); - - // directly open comment window if requested - var anchor = document.location.hash; - if (anchor.substring(0, 9) == '#comment-') { - $('#ao' + anchor.substring(9)).click(); - document.location.hash = '#s' + anchor.substring(9); - } -}); diff --git a/doc/build/html/_templates/class.html b/doc/build/html/_templates/class.html new file mode 100644 index 00000000..c1c91f3e --- /dev/null +++ b/doc/build/html/_templates/class.html @@ -0,0 +1,235 @@ + + + + + + + + + + <no title> — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +

        {{ fullname }} +{{ underline }}

        +
        + +
        + +
        +
        + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand. + +

        +
        + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
        +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/_templates/function.html b/doc/build/html/_templates/function.html new file mode 100644 index 00000000..82b8f580 --- /dev/null +++ b/doc/build/html/_templates/function.html @@ -0,0 +1,235 @@ + + + + + + + + + + <no title> — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +

        {{ fullname }} +{{ underline }}

        +
        + +
        + +
        +
        + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand. + +

        +
        + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
        +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/genindex.html b/doc/build/html/genindex.html index ca00b338..985ea964 100644 --- a/doc/build/html/genindex.html +++ b/doc/build/html/genindex.html @@ -1,46 +1,188 @@ - - - - - - - Index — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + Index — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
        -
        -
        -
        + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + +
          + +
        • »
        • + +
        • Index
        • + + +
        • + + + +
        • + +
        + + +
        +
        +
        +

        Index

        @@ -53,7 +195,6 @@

        Index

        | E | F | G - | I | L | M | N @@ -70,7 +211,7 @@

        Index

        A

        @@ -78,20 +219,18 @@

        A

        B

        - +
        @@ -99,47 +238,35 @@

        B

        C

        @@ -147,42 +274,28 @@

        C

        D

        @@ -191,31 +304,25 @@

        D

        E

        @@ -223,36 +330,22 @@

        E

        F

        @@ -260,34 +353,24 @@

        F

        G

        @@ -296,61 +379,41 @@

        G

        gp -
      • GPR (class in gp) -
      • -
      • GPRRFA (class in rfa) -
      • - -
        - -

        I

        - -

        L

        @@ -359,31 +422,31 @@

        L

        M

        @@ -396,7 +459,7 @@

        N

        normative @@ -405,7 +468,7 @@

        N

        normative_parallel @@ -414,27 +477,31 @@

        N

        P

        @@ -442,11 +509,7 @@

        P

        Q

        -
        @@ -454,9 +517,7 @@

        Q

        R

        @@ -473,33 +534,27 @@

        R

        S

        @@ -507,36 +562,30 @@

        S

        T

        - +

        U

        - @@ -545,7 +594,7 @@

        U

        V

        @@ -553,73 +602,54 @@

        V

        W

        -
        -
        +
        +
        +
        + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand. + +

        +
        + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
        - -
        -
        - - - + + + +
        + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/index.html b/doc/build/html/index.html index ce5f1cab..903d1ff9 100644 --- a/doc/build/html/index.html +++ b/doc/build/html/index.html @@ -1,125 +1,283 @@ - - - - - - - Predictive Clinical Neuroscience toolkit — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + Predictive Clinical Neuroscience toolkit — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
        -
        -
        -
        + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + +
          + +
        • »
        • + +
        • Predictive Clinical Neuroscience toolkit
        • + + +
        • + + + View page source + + +
        • + +
        + + +
        +
        +
        +

        Predictive Clinical Neuroscience toolkit

        -

        Contents:

        +

        Getting started

        + +
        + +
        +

        Function & Class Docs

        +
        +

        Current Events

        + +
        + - -
        +
        +
        -
        -
        - - - - + +
        + +
        + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/modindex.html b/doc/build/html/modindex.html index 320674cc..349bff04 100644 --- a/doc/build/html/modindex.html +++ b/doc/build/html/modindex.html @@ -1,63 +1,204 @@ - - - - - - - Module Index — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + Module Index — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - -
        -
        -
        -
        + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +

        Module Index

        -class bayesreg.BLR(hyp=None, X=None, y=None, n_iter=100, tol=0.001, verbose=False, var_groups=None, warp=None)[source]
        +class bayesreg.BLR(**kwargs)[source]

        Bases: object

        Bayesian linear regression

        Estimation and prediction of Bayesian linear regression models

        Basic usage:

        -
        B = BLR()
        -hyp = B.estimate(hyp0, X, y)
        -ys,s2 = B.predict(hyp, X, y, Xs)
        +
        B = BLR()
        +hyp = B.estimate(hyp0, X, y)
        +ys,s2 = B.predict(hyp, X, y, Xs)
         

        where the variables are

        @@ -80,7 +221,7 @@

        Navigation

        The hyperparameters are:

        -
        hyp = ( log(beta), log(alpha) )  # hyp is a list or numpy array
        +
        hyp = ( log(beta), log(alpha) )  # hyp is a list or numpy array
         

        The implementation and notation mostly follows Bishop (2006). @@ -95,35 +236,126 @@

        Navigation

        Written by A. Marquand

        -dloglik(hyp, X, y)[source]
        +dloglik(hyp, X, y, Xv=None)[source]

        Function to compute derivatives

        -estimate(hyp0, X, y, optimizer='cg')[source]
        +estimate(hyp0, X, y, **kwargs)[source]

        Function to estimate the model

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates

        • +
        • y – responses

        • +
        • optimizer – optimisation algorithm (‘cg’,’powell’,’nelder-mead’,’l0bfgs-b’)

        • +
        +
        +
        -loglik(hyp, X, y)[source]
        +loglik(hyp, X, y, Xv=None)[source]

        Function to compute compute log (marginal) likelihood

        +
        +
        +penalized_loglik(hyp, X, y, Xv=None, l=0.1, norm='L1')[source]
        +

        Function to compute the penalized log (marginal) likelihood

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates

        • +
        • y – responses

        • +
        • Xv – covariates for heteroskedastic noise

        • +
        • l – regularisation penalty

        • +
        • norm – type of regulariser (L1 or L2)

        • +
        +
        +
        +
        +
        -post(hyp, X, y)[source]
        +post(hyp, X, y, Xv=None)[source]

        Generic function to compute posterior distribution.

        This function will save the posterior mean and precision matrix as self.m and self.A and will also update internal parameters (e.g. N, D and the prior covariance (Sigma_a) and precision (Lambda_a).

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates

        • +
        • y – responses

        • +
        • Xv – covariates for heteroskedastic noise

        • +
        +
        +
        -predict(hyp, X, y, Xs, var_groups_test=None)[source]
        +predict(hyp, X, y, Xs, var_groups_test=None, var_covariates_test=None, **kwargs)[source]

        Function to make predictions from the model

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates for training data

        • +
        • y – responses for training data

        • +
        • Xs – covariates for test data

        • +
        • var_covariates_test – test covariates for heteroskedastic noise

        • +
        +
        +
        +

        This always returns Gaussian predictions, i.e.

        +
        +
        Returns
        +

          +
        • ys - predictive mean

        • +
        • s2 - predictive variance

        • +
        +

        +
        +
        +
        + +
        +
        +predict_and_adjust(hyp, X, y, Xs=None, ys=None, var_groups_test=None, var_groups_adapt=None, **kwargs)[source]
        +

        Function to transfer the model to a new site. This is done by +first making predictions on the adaptation data given by X, +adjusting by the residuals with respect to y.

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates for adaptation (i.e. calibration) data

        • +
        • y – responses for adaptation data

        • +
        • Xs – covariate data (for which predictions should be adjusted)

        • +
        • ys – true response variables (to be adjusted)

        • +
        • var_groups_test – variance groups (e.g. sites) for test data

        • +
        • var_groups_adapt – variance groups for adaptation data

        • +
        +
        +
        +

        There are two possible ways of using this function, depending on +whether ys or Xs is specified

        +

        If ys is specified, this is applied directly to the data, which is +assumed to be in the input space (i.e. not warped). In this case +the adjusted true data points are returned in the same space

        +

        Alternatively, Xs is specified, then the predictions are made and +adjusted. In this case the predictive variance are returned in the +warped (i.e. Gaussian) space.

        +

        This function needs to know which sites are associated with which +data points, which provided by var_groups_xxx, which is a list or +array of scalar ids .

        @@ -134,10 +366,10 @@

        Navigation

        Bases: object

        Base class for covariance functions.

        All covariance functions must define the following methods:

        -
        CovFunction.get_n_params()
        -CovFunction.cov()
        -CovFunction.xcov()
        -CovFunction.dcov()
        +
        CovFunction.get_n_params()
        +CovFunction.cov()
        +CovFunction.xcov()
        +CovFunction.dcov()
         
        @@ -164,7 +396,7 @@

        Navigation

        class gp.CovLin(x=None)[source]
        -

        Bases: gp.CovBase

        +

        Bases: gp.CovBase

        Linear covariance function (no hyperparameters)

        @@ -190,10 +422,10 @@

        Navigation

        class gp.CovSqExp(x=None)[source]
        -

        Bases: gp.CovBase

        +

        Bases: gp.CovBase

        Ordinary squared exponential covariance function. The hyperparameters are:

        -
        theta = ( log(ell), log(sf) )
        +
        theta = ( log(ell), log(sf) )
         

        where ell is a lengthscale parameter and sf2 is the signal variance

        @@ -221,10 +453,10 @@

        Navigation

        class gp.CovSqExpARD(x=None)[source]
        -

        Bases: gp.CovBase

        +

        Bases: gp.CovBase

        Squared exponential covariance function with ARD The hyperparameters are:

        -
        theta = (log(ell_1, ..., log_ell_D), log(sf))
        +
        theta = (log(ell_1, ..., log_ell_D), log(sf))
         

        where ell_i are lengthscale parameters and sf2 is the signal variance

        @@ -252,15 +484,15 @@

        Navigation

        class gp.CovSum(x=None, covfuncnames=None)[source]
        -

        Bases: gp.CovBase

        +

        Bases: gp.CovBase

        Sum of covariance functions. These are passed in as a cell array and intialised automatically. For example:

        -
        C = CovSum(x,(CovLin, CovSqExpARD))
        -C = CovSum.cov(x, )
        +
        C = CovSum(x,(CovLin, CovSqExpARD))
        +C = CovSum.cov(x, )
         

        The hyperparameters are:

        -
        theta = ( log(ell_1, ..., log_ell_D), log(sf2) )
        +
        theta = ( log(ell_1, ..., log_ell_D), log(sf2) )
         

        where ell_i are lengthscale parameters and sf2 is the signal variance

        @@ -292,9 +524,9 @@

        Navigation

        Gaussian process regression

        Estimation and prediction of Gaussian process regression models

        Basic usage:

        -
        G = GPR()
        -hyp = B.estimate(hyp0, cov, X, y)
        -ys, ys2 = B.predict(hyp, cov, X, y, Xs)
        +
        G = GPR()
        +hyp = B.estimate(hyp0, cov, X, y)
        +ys, ys2 = B.predict(hyp, cov, X, y, Xs)
         

        where the variables are

        @@ -318,7 +550,7 @@

        Navigation

        The hyperparameters are:

        -
        hyp = ( log(sn), (cov function params) )  # hyp is a list or array
        +
        hyp = ( log(sn), (cov function params) )  # hyp is a list or array
         

        The implementation and notation follows Rasmussen and Williams (2006). @@ -365,13 +597,14 @@

        Navigation

        normative.estimate(covfile, respfile, **kwargs)[source]

        Estimate a normative model

        -

        This will estimate a model in one of two settings according to theparticular parameters specified (see below)

        +

        This will estimate a model in one of two settings according to +theparticular parameters specified (see below)

        • under k-fold cross-validation. requires respfile, covfile and cvfolds>=2

        • estimating a training dataset then applying to a second test dataset. requires respfile, covfile, testcov and testresp.

        • -
        • estimating on a training dataset ouput of forward maps mean and se. +

        • estimating on a training dataset ouput of forward maps mean and se. requires respfile, covfile and testcov

        The models are estimated on the basis of data stored on disk in ascii or @@ -380,7 +613,7 @@

        Navigation

        number of variables in columns. Neuroimaging data will be reshaped into the appropriate format

        Basic usage:

        -
        estimate(covfile, respfile, [extra_arguments])
        +
        estimate(covfile, respfile, [extra_arguments])
         

        where the variables are defined below. Note that either the cfolds @@ -398,6 +631,10 @@

        Navigation

      • configparam – Parameters controlling the estimation algorithm

      • saveoutput – Save the output to disk? Otherwise returned as arrays

      • outputsuffix – Text string to add to the output filenames

      • +
      • inscale – Scaling approach for input covariates, could be ‘None’ (Default), +‘standardize’, ‘minmax’, or ‘robminmax’.

      • +
      • outscale – Scaling approach for output responses, could be ‘None’ (Default), +‘standardize’, ‘minmax’, or ‘robminmax’.

      • @@ -422,7 +659,7 @@

        Navigation

        -normative.evaluate(Y, Yhat, S2=None, mY=None, sY=None, metrics=['Rho', 'RMSE', 'SMSE', 'EXPV', 'MSLL'])[source]
        +normative.evaluate(Y, Yhat, S2=None, mY=None, sY=None, nlZ=None, nm=None, Xz_tr=None, alg=None, metrics=['Rho', 'RMSE', 'SMSE', 'EXPV', 'MSLL'])[source]

        Compute error metrics This function will compute error metrics based on a set of predictions Yhat and a set of true response variables Y, namely:

        @@ -433,8 +670,8 @@

        Navigation

      • EXPV: explained variance

      • If the predictive variance is also specified the log loss will be computed -(which also takes into account the predictive variance). If the mean and -standard deviation are also specified these will be used to standardize +(which also takes into account the predictive variance). If the mean and +standard deviation are also specified these will be used to standardize this, yielding the mean standardized log loss

        Parameters
        @@ -482,13 +719,13 @@

        Navigation

        -normative.predict(covfile, respfile=None, maskfile=None, **kwargs)[source]
        -

        Make predictions on the basis of a pre-estimated normative model -If only the covariates are specified then only predicted mean and variance +normative.predict(covfile, respfile, maskfile=None, **kwargs)[source] +

        Make predictions on the basis of a pre-estimated normative model +If only the covariates are specified then only predicted mean and variance will be returned. If the test responses are also specified then quantities That depend on those will also be returned (Z scores and error metrics)

        Basic usage:

        -
        predict(covfile, [extra_arguments])
        +
        predict(covfile, [extra_arguments])
         

        where the variables are defined below.

        @@ -499,9 +736,8 @@

        Navigation

      • respfile – test response variables for the normative model

      • maskfile – mask used to apply to the data (nifti only)

      • model_path – Directory containing the normative model and metadata. -When using parallel prediction, do not pass the model path. It will be automatically -decided.

      • -
      • output_path – Directory to store the results

      • +When using parallel prediction, do not pass the model path. It will be +automatically decided.

      • outputsuffix – Text string to add to the output filenames

      • batch_size – batch size (for use with normative_parallel)

      • job_id – batch id

      • @@ -528,11 +764,11 @@

        Navigation

        normative.transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None, **kwargs)[source]
        -

        Transfer learning on the basis of a pre-estimated normative model by using -the posterior distribution over the parameters as an informed prior for +

        Transfer learning on the basis of a pre-estimated normative model by using +the posterior distribution over the parameters as an informed prior for new data. currently only supported for HBR.

        Basic usage:

        -
        transfer(covfile, respfile [extra_arguments])
        +
        transfer(covfile, respfile [extra_arguments])
         

        where the variables are defined below.

        @@ -671,7 +907,7 @@

        Navigation

      • processing_dir -> Full path to the processing dir

      • python_path -> Full path to the python distribution

      • -
        normative_path -> Full path to the normative.py. If None (default)

        then it will automatically retrieves the path from +

        normative_path -> Full path to the normative.py. If None (default)

        then it will automatically retrieves the path from the installed packeage.

        @@ -903,7 +1139,7 @@

        Navigation

        number of subjects in columns. Neuroimaging data will be reshaped into the appropriate format

        Basic usage:

        -
        estimate(filename, maskfile, basis)
        +
        estimate(filename, maskfile, basis)
         

        where the variables are defined below. Note that either the cfolds @@ -962,9 +1198,9 @@

        Navigation

        Random Feature Approximation for Gaussian Process Regression

        Estimation and prediction of Bayesian linear regression models

        Basic usage:

        -
        R = GPRRFA()
        -hyp = R.estimate(hyp0, X, y)
        -ys,s2 = R.predict(hyp, X, y, Xs)
        +
        R = GPRRFA()
        +hyp = R.estimate(hyp0, X, y)
        +ys,s2 = R.predict(hyp, X, y, Xs)
         

        where the variables are

        @@ -987,13 +1223,13 @@

        Navigation

      • The hyperparameters are:

        -
        hyp = [ log(sn), log(ell), log(sf) ]  # hyp is a numpy array
        +
        hyp = [ log(sn), log(ell), log(sf) ]  # hyp is a numpy array
         
        -

        where sn^2 is the noise variance, ell are lengthscale parameters and +

        where sn^2 is the noise variance, ell are lengthscale parameters and sf^2 is the signal variance. This provides an approximation to the covariance function:

        -
        k(x,z) = x'*z + sn2*exp(0.5*(x-z)'*Lambda*(x-z))
        +
        k(x,z) = x'*z + sn2*exp(0.5*(x-z)'*Lambda*(x-z))
         

        where Lambda = diag((ell_1^2, … ell_D^2))

        @@ -1135,677 +1371,54 @@

        Navigation

        fileio.vol2vec(dat, mask, verbose=False)[source]
        -
        -
        -class utils.CustomCV(train, test, X=None, y=None)[source]
        -

        Bases: object

        -

        Custom cross-validation approach. This function does not do much, it -merely provides a wrapper designed to be compatible with -scikit-learn (e.g. sklearn.model_selection…)

        -
        -
        Parameters
        -
          -
        • train – a list of indices of training splits (each itself a list)

        • -
        • test – a list of indices of test splits (each itself a list)

        • -
        -
        -
        Returns tr
        -

        Indices for training set

        -
        -
        Returns te
        -

        Indices for test set

        -
        -
        -
        -
        -split(X, y=None)[source]
        -
        - -
        - -
        -
        -utils.FDR(p_values, alpha)[source]
        -

        Compute the false discovery rate in all voxels for a subject.

        -
        - -
        -
        -class utils.WarpAffine[source]
        -

        Bases: utils.WarpBase

        -

        Affine warp -y = a + b*x

        -
        -
        -df(x, params)[source]
        -

        Return the derivative of the warp, dw(x)/dx

        -
        - -
        -
        -f(x, params)[source]
        -

        Evaluate the warping function (mapping non-Gaussian respone -variables to Gaussian variables)

        -
        - -
        -
        -get_n_params()
        -

        Report the number of parameters required

        -
        - -
        -
        -invf(y, params)[source]
        -

        Evaluate the warping function (mapping Gaussian latent variables -to non-Gaussian response variables)

        -
        - -
        -
        -warp_predictions(mu, s2, param, percentiles=[0.025, 0.975])
        -

        Compute the warped predictions from a gaussian predictive -distribution, specifed by a mean (mu) and variance (s2)

        -
        -
        Parameters
        -
          -
        • mu – Gassian predictive mean

        • -
        • s2 – Predictive variance

        • -
        • param – warping parameters

        • -
        • percentiles – Desired percentiles of the warped likelihood

        • -
        -
        -
        Returns
        -

          -
        • median - median of the predictive distribution

        • -
        • pred_interval - predictive interval(s)

        • -
        -

        -
        -
        -
        - -
        - -
        -
        -class utils.WarpBase[source]
        -

        Bases: object

        -

        Base class for likelihood warping following: -Rios and Torab (2019) Compositionally-warped Gaussian processes -https://www.sciencedirect.com/science/article/pii/S0893608019301856

        -

        All Warps must define the following methods:

        -
        Warp.get_n_params() - return number of parameters
        -Warp.f() - warping function (Non-Gaussian field -> Gaussian)
        -Warp.invf() - inverse warp
        -Warp.df() - derivatives
        -Warp.warp_predictions() - compute predictive distribution
        -
        -
        -
        -
        -abstract df(x, param)[source]
        -

        Return the derivative of the warp, dw(x)/dx

        -
        - -
        -
        -abstract f(x, param)[source]
        -

        Evaluate the warping function (mapping non-Gaussian respone -variables to Gaussian variables)

        -
        - -
        -
        -get_n_params()[source]
        -

        Report the number of parameters required

        -
        - -
        -
        -abstract invf(y, param)[source]
        -

        Evaluate the warping function (mapping Gaussian latent variables -to non-Gaussian response variables)

        -
        - -
        -
        -warp_predictions(mu, s2, param, percentiles=[0.025, 0.975])[source]
        -

        Compute the warped predictions from a gaussian predictive -distribution, specifed by a mean (mu) and variance (s2)

        -
        -
        Parameters
        -
          -
        • mu – Gassian predictive mean

        • -
        • s2 – Predictive variance

        • -
        • param – warping parameters

        • -
        • percentiles – Desired percentiles of the warped likelihood

        • -
        -
        -
        Returns
        -

          -
        • median - median of the predictive distribution

        • -
        • pred_interval - predictive interval(s)

        • -
        -

        -
        -
        -
        - -
        - -
        -
        -class utils.WarpBoxCox[source]
        -

        Bases: utils.WarpBase

        -

        Box cox transform having a single parameter (lambda), i.e.

        -

        y = (sign(x) * abs(x) ** lamda - 1) / lambda

        -

        This follows the generalization in Bicken and Doksum (1981) JASA 76 -and allows x to assume negative values.

        -
        -
        -df(x, params)[source]
        -

        Return the derivative of the warp, dw(x)/dx

        -
        - -
        -
        -f(x, params)[source]
        -

        Evaluate the warping function (mapping non-Gaussian respone -variables to Gaussian variables)

        -
        - -
        -
        -get_n_params()
        -

        Report the number of parameters required

        -
        - -
        -
        -invf(y, params)[source]
        -

        Evaluate the warping function (mapping Gaussian latent variables -to non-Gaussian response variables)

        -
        - -
        -
        -warp_predictions(mu, s2, param, percentiles=[0.025, 0.975])
        -

        Compute the warped predictions from a gaussian predictive -distribution, specifed by a mean (mu) and variance (s2)

        -
        -
        Parameters
        -
          -
        • mu – Gassian predictive mean

        • -
        • s2 – Predictive variance

        • -
        • param – warping parameters

        • -
        • percentiles – Desired percentiles of the warped likelihood

        • -
        -
        -
        Returns
        -

          -
        • median - median of the predictive distribution

        • -
        • pred_interval - predictive interval(s)

        • -
        -

        -
        -
        -
        - -
        - -
        -
        -class utils.WarpCompose(warpnames=None)[source]
        -

        Bases: utils.WarpBase

        -

        Composition of warps. These are passed in as an array and -intialised automatically. For example:

        -
        W = WarpCompose(('WarpBoxCox', 'WarpAffine'))
        -
        -
        -

        where ell_i are lengthscale parameters and sf2 is the signal variance

        -
        -
        -df(x, theta)[source]
        -

        Return the derivative of the warp, dw(x)/dx

        -
        - -
        -
        -f(x, theta)[source]
        -

        Evaluate the warping function (mapping non-Gaussian respone -variables to Gaussian variables)

        -
        - -
        -
        -get_n_params()
        -

        Report the number of parameters required

        -
        - -
        -
        -invf(x, theta)[source]
        -

        Evaluate the warping function (mapping Gaussian latent variables -to non-Gaussian response variables)

        -
        - -
        -
        -warp_predictions(mu, s2, param, percentiles=[0.025, 0.975])
        -

        Compute the warped predictions from a gaussian predictive -distribution, specifed by a mean (mu) and variance (s2)

        -
        -
        Parameters
        -
          -
        • mu – Gassian predictive mean

        • -
        • s2 – Predictive variance

        • -
        • param – warping parameters

        • -
        • percentiles – Desired percentiles of the warped likelihood

        • -
        -
        -
        Returns
        -

          -
        • median - median of the predictive distribution

        • -
        • pred_interval - predictive interval(s)

        • -
        -

        -
        -
        -
        - -
        - -
        -
        -class utils.WarpSinArcsinh[source]
        -

        Bases: utils.WarpBase

        -

        Sin-hyperbolic arcsin warp having two parameters (a, b) and defined by

        -

        y = sinh(b * arcsinh(x) - a)

        -

        Using the parametrisation of Rios et al, Neural Networks 118 (2017) -where a controls skew and b controls kurtosis, such that:

        -
        -

        a = 0 : symmetric -a > 0 : positive skew -a < 0 : negative skew -b = 1 : mesokurtic -b > 1 : leptokurtic -b < 1 : platykurtic

        -
        -

        where b > 0. However, it is more convenentent to use an alternative -parameterisation, where

        -

        y = sinh(b * arcsinh(x) + epsilon * b)

        -

        and a = -epsilon*b

        -

        see Jones and Pewsey A (2009) Biometrika, 96 (4) (2009)

        -
        -
        -df(x, params)[source]
        -

        Return the derivative of the warp, dw(x)/dx

        -
        - -
        -
        -f(x, params)[source]
        -

        Evaluate the warping function (mapping non-Gaussian respone -variables to Gaussian variables)

        -
        - -
        -
        -get_n_params()
        -

        Report the number of parameters required

        -
        - -
        -
        -invf(y, params)[source]
        -

        Evaluate the warping function (mapping Gaussian latent variables -to non-Gaussian response variables)

        -
        - -
        -
        -warp_predictions(mu, s2, param, percentiles=[0.025, 0.975])
        -

        Compute the warped predictions from a gaussian predictive -distribution, specifed by a mean (mu) and variance (s2)

        -
        -
        Parameters
        -
          -
        • mu – Gassian predictive mean

        • -
        • s2 – Predictive variance

        • -
        • param – warping parameters

        • -
        • percentiles – Desired percentiles of the warped likelihood

        • -
        -
        -
        Returns
        -

          -
        • median - median of the predictive distribution

        • -
        • pred_interval - predictive interval(s)

        • -
        -

        -
        -
        -
        - -
        - -
        -
        -utils.bashwrap(processing_dir, python_path, script_command, job_name, bash_environment=None)[source]
        -

        This function wraps normative modelling into a bash script to run it -on a torque cluster system.

        -
        -
        Parameters
        -
          -
        • processing_dir – Full path to the processing dir

        • -
        • python_path – Full path to the python distribution

        • -
        • script_command – python command to execute

        • -
        • job_name – Name for the bash script output by this function

        • -
        • covfile_path – Full path to covariates

        • -
        • respfile_path – Full path to response variables

        • -
        • cv_folds – Number of cross validations

        • -
        • testcovfile_path – Full path to test covariates

        • -
        • testrespfile_path – Full path to tes responses

        • -
        • bash_environment – A file containing enviornment specific commands

        • -
        -
        -
        Returns
        -

        A .sh file containing the commands for normative modelling

        -
        -
        -

        witten by Thomas Wolfers

        -
        - -
        -
        -utils.calibration_error(Y, m, s, cal_levels)[source]
        -
        - -
        -
        -utils.compute_MSLL(ytrue, ypred, ypred_var, train_mean=None, train_var=None)[source]
        -

        Computes the MSLL or MLL (not standardized) if ‘train_mean’ and ‘train_var’ are None.

        -

        Basic usage:

        -
        MSLL = compute_MSLL(ytrue, ypred, ytrue_sig, noise_variance, train_mean, train_var)
        -
        -
        -

        where

        -
        -
        :param ytruen*p matrix of true values where n is the number of samples

        and p is the number of features.

        -
        -
        :param ypredn*p matrix of predicted values where n is the number of samples

        and p is the number of features.

        -
        -
        :param ypred_varn*p matrix of summed noise variances and prediction variances where n is the number of samples

        and p is the number of features.

        -
        -
        -
        -
        Parameters
        -

        train_mean – p dimensional vector of mean values of the training data for each feature.

        -
        -
        -

        :param train_var : p dimensional vector of covariances of the training data for each feature.

        -

        :returns loss : p dimensional vector of MSLL or MLL for each feature.

        -
        +
        -
        -
        -utils.compute_pearsonr(A, B)[source]
        -

        Manually computes the Pearson correlation between two matrices.

        -

        Basic usage:

        -
        compute_pearsonr(A, B)
        -
        -
        -
        -
        Parameters
        -
          -
        • A – an N * M data array

        • -
        • cov – an N * M array

        • -
        -
        -
        Returns Rho
        -

        N dimensional vector of correlation coefficients

        -
        -
        Returns ys2
        -

        N dimensional vector of p-values

        -
        -
        -

        Notes:

        -
        This function is useful when M is large and only the diagonal entries
        -of the resulting correlation matrix are of interest. This function
        -does not compute the full correlation matrix as an intermediate step
        -
        -
        -
        -
        -
        -utils.create_bspline_basis(xmin, xmax, p=3, nknots=5)[source]
        -

        compute a Bspline basis set where:

        -
        -
        Parameters
        -
          -
        • p – order of spline (3 = cubic)

        • -
        • nknots – number of knots (endpoints only counted once)

        • -
        -
        -
        -
        - -
        -
        -utils.create_poly_basis(X, dimpoly)[source]
        -

        compute a polynomial basis expansion of the specified order

        -
        - -
        -
        -utils.divergence_plot(nm, ylim=None)[source]
        -
        - -
        -
        -utils.explained_var(ytrue, ypred)[source]
        -

        Computes the explained variance of predicted values.

        -

        Basic usage:

        -
        exp_var = explained_var(ytrue, ypred)
        -
        -
        -

        where

        -
        -
        Ytrue
        -

        n*p matrix of true values where n is the number of samples -and p is the number of features.

        -
        -
        Ypred
        -

        n*p matrix of predicted values where n is the number of samples -and p is the number of features.

        -
        -
        Returns exp_var
        -

        p dimentional vector of explained variances for each feature.

        -
        -
        -
        - -
        -
        -utils.extreme_value_prob(params, NPM, perc)[source]
        -
        - -
        -
        -utils.extreme_value_prob_fit(NPM, perc)[source]
        -
        - -
        -
        -utils.load_freesurfer_measure(measure, data_path, subjects_list)[source]
        -

        This is a utility function to load different Freesurfer measures in a pandas -Dataframe.

        -
        -
        Inputs:
          -
        • measure: a string that defines the type of Freesurfer measure we want

        • -
        -
        -
        to load. The options include:
          -
        • ‘NumVert’: Number of Vertices in each cortical area based on Destrieux atlas.

        • -
        • ‘SurfArea: Surface area for each cortical area based on Destrieux atlas.

        • -
        • ‘GrayVol’: Gary matter volume in each cortical area based on Destrieux atlas.

        • -
        • ‘ThickAvg’: Average Cortical thinckness in each cortical area based on Destrieux atlas.

        • -
        • ‘ThickStd’: STD of Cortical thinckness in each cortical area based on Destrieux atlas.

        • -
        • ‘MeanCurv’: Integrated Rectified Mean Curvature in each cortical area based on Destrieux atlas.

        • -
        • ‘GausCurv’: Integrated Rectified Gaussian Curvature in each cortical area based on Destrieux atlas.

        • -
        • ‘FoldInd’: Folding Index in each cortical area based on Destrieux atlas.

        • -
        • ‘CurvInd’: Intrinsic Curvature Index in each cortical area based on Destrieux atlas.

        • -
        • ‘brain’: Brain Segmentation Statistics from aseg.stats file.

        • -
        • ‘subcortical_volumes’: Subcortical areas volume.

        • -
        -
        -
        -
          -
        • data_path: a string that specifies the path to the main Freesurfer folder.

        • -
        • subjects_list: A Pythin list containing the list of subject names to load the data for.

        • -
        -

        The subject names should match the folder name for each subject’s Freesurfer data folder.

        -
        -
        Outputs:
          -
        • df: A pandas datafrmae containing the subject names as Index and target Freesurfer measures.

        • -
        • missing_subs: A Python list of subject names that miss the target Freesurefr measures.

        • -
        -
        -
        -
        +
        + +
        +
        + -
        -
        -utils.qsub(job_path, memory, duration, logdir=None)[source]
        -

        This function submits a job.sh scipt to the torque custer using the qsub -command.

        -
        -
        ** Input:
          -
        • job_path -> Full path to the job.sh file

        • -
        • -
          memory -> Memory requirements written as string for example

          4gb or 500mb

          -
          -
          -
        • -
        • -
          duration -> The approximate duration of the job, a string with

          HH:MM:SS for example 01:01:01

          -
          -
          -
        • -
        -
        -
        ** Output:
          -
        • Submission of the job to the (torque) cluster

        • -
        -
        -
        -

        witten by Thomas Wolfers

        -
        +
        -
        -
        -utils.ravel_2D(a)[source]
        -
        +
        +

        + © Copyright 2020, Andre F. Marquand. -

        -
        -utils.simulate_data(method='linear', n_samples=100, n_features=1, n_grps=1, working_dir=None, plot=False, random_state=None, noise=None)[source]
        -

        This function simulates linear synthetic data for testing pcntoolkit methods.

        -
        -
        Parameters
        -
          -
        • method – simulate ‘linear’ or ‘non-linear’ function.

        • -
        • n_samples – number of samples in each group of the training and test sets. -If it is an int then the same sample number will be used for all groups. -It can be also a list of size of n_grps that decides the number of samples -in each group (default=100).

        • -
        • n_features – A positive integer that decides the number of features -(default=1).

        • -
        • n_grps – A positive integer that decides the number of groups in data -(default=1).

        • -
        • working_dir – Directory to save data (default=None).

        • -
        • plot – Boolean to plot the simulated training data (default=False).

        • -
        • random_state – random state for generating random numbers (Default=None).

        • -
        • noise – Type of added noise to the data. The options are ‘gaussian’, -‘exponential’, and ‘hetero_gaussian’ (The defauls is None.).

        • -
        -
        -
        Returns
        -

        X_train, Y_train, grp_id_train, X_test, Y_test, grp_id_test, coef

        -
        -
        -
        +

        +
        + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. -
        -
        -utils.squared_dist(x, z=None)[source]
        -

        compute sum((x-z) ** 2) for all vectors in a 2d array

        -
        +
        +
        +
        -
        -
        -utils.threshold_NPM(NPMs, fdr_thr=0.05, npm_thr=0.1)[source]
        -

        Compute voxels with significant NPMs.

        -
        +
        -
        -
        -utils.unravel_2D(a, s)[source]
        -
        +
        + -
        + + + + + -
        -
        -
        -
        - -
        -
        - - - + \ No newline at end of file diff --git a/doc/build/html/objects.inv b/doc/build/html/objects.inv index 08fc4148..2e6e4475 100644 Binary files a/doc/build/html/objects.inv and b/doc/build/html/objects.inv differ diff --git a/doc/build/html/pages/FAQs.html b/doc/build/html/pages/FAQs.html new file mode 100644 index 00000000..757b8f35 --- /dev/null +++ b/doc/build/html/pages/FAQs.html @@ -0,0 +1,242 @@ + + + + + + + + + + Frequently Asked Questions — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Frequently Asked Questions

        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/_templates/class.html b/doc/build/html/pages/_templates/class.html new file mode 100644 index 00000000..e2a3dcb6 --- /dev/null +++ b/doc/build/html/pages/_templates/class.html @@ -0,0 +1,222 @@ + + + + + + + + + + + + <no title> — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +

        {{ fullname }} +{{ underline }}

        +
        + +
        + +
        +
        + + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand + +

        +
        + Built with Sphinx using a theme provided by Read the Docs. + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/_templates/function.html b/doc/build/html/pages/_templates/function.html new file mode 100644 index 00000000..5abf1b7e --- /dev/null +++ b/doc/build/html/pages/_templates/function.html @@ -0,0 +1,222 @@ + + + + + + + + + + + + <no title> — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +

        {{ fullname }} +{{ underline }}

        +
        + +
        + +
        +
        + + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand + +

        +
        + Built with Sphinx using a theme provided by Read the Docs. + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/acknowledgements.html b/doc/build/html/pages/acknowledgements.html new file mode 100644 index 00000000..270bc998 --- /dev/null +++ b/doc/build/html/pages/acknowledgements.html @@ -0,0 +1,251 @@ + + + + + + + + + + Acknowledgements — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Acknowledgements

        +

        We gratefully acknowledge funding from the Dutch Organisation for Scientific Research (NWO), via a Vernieuwingsimpuls VIDI fellowship, from the UK Wellcome Trust via a Digital Innovator grant and from the UK Medical Research Council via an Experimental Medicine Challenge Grant.

        +

        Core developers of the toolbox are:

        +
          +
        • Andre Marquand

        • +
        • Seyed Mostafa Kia

        • +
        • Thomas Wolfers

        • +
        • Saige Rutherford

        • +
        • Richard Dinga

        • +
        • Mariam Zabihi

        • +
        • Charlotte Fraza

        • +
        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/citing.html b/doc/build/html/pages/citing.html new file mode 100644 index 00000000..9b0ca813 --- /dev/null +++ b/doc/build/html/pages/citing.html @@ -0,0 +1,256 @@ + + + + + + + + + + How to cite PCNtoolkit — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        How to cite PCNtoolkit

        +

        If you use the PCNtoolkit, please consider citing some of the following work:

        +

        Marquand, A. F., Wolfers, T., Mennes, M., Buitelaar, J., & Beckmann, C. F. (2016). Beyond Lumping and Splitting: A Review of Computational Approaches for Stratifying Psychiatric Disorders. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 1(5), 433–447. https://doi.org/10.1016/j.bpsc.2016.04.002

        +

        Marquand, A. F., Rezek, I., Buitelaar, J., & Beckmann, C. F. (2016). Understanding Heterogeneity in Clinical Cohorts Using Normative Models: Beyond Case-Control Studies. Biological Psychiatry, 80(7), 552–561. https://doi.org/10.1016/j.biopsych.2015.12.023

        +

        Marquand, A. F., Kia, S. M., Zabihi, M., Wolfers, T., Buitelaar, J. K., & Beckmann, C. F. (2019). Conceptualizing mental disorders as deviations from normative functioning. Molecular Psychiatry, 24(10), 1415–1424. https://doi.org/10.1038/s41380-019-0441-1

        +

        Marquand, A. F., Haak, K. V., & Beckmann, C. F. (2017). Functional corticostriatal connection topographies predict goal directed behaviour in humans. Nature Human Behaviour, 1(8). https://doi.org/10.1038/s41562-017-0146

        +

        Wolfers, T., Beckmann, C. F., Hoogman, M., Buitelaar, J. K., Franke, B., & Marquand, A. F. (2020). Individual differences v. the average patient: Mapping the heterogeneity in ADHD using normative models. Psychological Medicine, 50(2), 314–323. https://doi.org/10.1017/S0033291719000084

        +

        Wolfers, T., Rokicki, J., Alnæs, D., Berthet, P., Agartz, I., Kia, S. M., Kaufmann, T., Zabihi, M., Moberget, T., Melle, I., Beckmann, C. F., Andreassen, O. A., Marquand, A. F., & Westlye, L. T. (n.d.). Replicating extensive brain structural heterogeneity in individuals with schizophrenia and bipolar disorder. Human Brain Mapping, n/a(n/a). https://doi.org/10.1002/hbm.25386

        +

        Zabihi, M., Floris, D. L., Kia, S. M., Wolfers, T., Tillmann, J., Arenas, A. L., Moessnang, C., Banaschewski, T., Holt, R., Baron-Cohen, S., Loth, E., Charman, T., Bourgeron, T., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. (2020). Fractionating autism based on neuroanatomical normative modeling. Translational Psychiatry, 10(1), 1–10. https://doi.org/10.1038/s41398-020-01057-0

        +

        Zabihi, M., Oldehinkel, M., Wolfers, T., Frouin, V., Goyard, D., Loth, E., Charman, T., Tillmann, J., Banaschewski, T., Dumas, G., Holt, R., Baron-Cohen, S., Durston, S., Bölte, S., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. F. (2019). Dissecting the Heterogeneous Cortical Anatomy of Autism Spectrum Disorder Using Normative Models. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 4(6), 567–578. https://doi.org/10.1016/j.bpsc.2018.11.013

        +

        Kia, S. M., & Marquand, A. (2018). Normative Modeling of Neuroimaging Data using Scalable Multi-Task Gaussian Processes. ArXiv:1806.01047 [Cs, Stat]. http://arxiv.org/abs/1806.01047

        +

        Kia, S. M., Beckmann, C. F., & Marquand, A. F. (2018). Scalable Multi-Task Gaussian Process Tensor Regression for Normative Modeling of Structured Variation in Neuroimaging Data. ArXiv:1808.00036 [Cs, Stat]. http://arxiv.org/abs/1808.00036

        +

        Kia, S. M., Huijsdens, H., Dinga, R., Wolfers, T., Mennes, M., Andreassen, O. A., Westlye, L. T., Beckmann, C. F., & Marquand, A. F. (2020). Hierarchical Bayesian Regression for Multi-site Normative Modeling of Neuroimaging Data. In A. L. Martel, P. Abolmaesumi, D. Stoyanov, D. Mateus, M. A. Zuluaga, S. K. Zhou, D. Racoceanu, & L. Joskowicz (Eds.), Medical Image Computing and Computer Assisted Intervention – MICCAI 2020 (pp. 699–709). Springer International Publishing. https://doi.org/10.1007/978-3-030-59728-3_68

        +

        Huertas, I., Oldehinkel, M., van Oort, E. S. B., Garcia-Solis, D., Mir, P., Beckmann, C. F., & Marquand, A. F. (2017). A Bayesian spatial model for neuroimaging data based on biologically informed basis functions. NeuroImage, 161, 134–148. https://doi.org/10.1016/j.neuroimage.2017.08.009

        +

        Fraza, C. J., Dinga, R., Beckmann, C. F., & Marquand, A. F. (2021). Warped Bayesian Linear Regression for Normative Modelling of Big Data. BioRxiv, 2021.04.05.438429. https://doi.org/10.1101/2021.04.05.438429

        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/glossary.html b/doc/build/html/pages/glossary.html new file mode 100644 index 00000000..0096ce0c --- /dev/null +++ b/doc/build/html/pages/glossary.html @@ -0,0 +1,242 @@ + + + + + + + + + + Glossary — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Glossary

        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/installation.html b/doc/build/html/pages/installation.html new file mode 100644 index 00000000..fb7a2d20 --- /dev/null +++ b/doc/build/html/pages/installation.html @@ -0,0 +1,334 @@ + + + + + + + + + + Installation — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Installation

        +
        +

        Basic installation (on a local machine)

        +
          +
        1. Install anaconda3

        2. +
        3. Create enviornment

        4. +
        +
        conda create --name <env_name>
        +
        +
        +
          +
        1. Activate environment

        2. +
        +
        source activate <env_name>
        +
        +
        +
          +
        1. Install required conda packages

        2. +
        +
        conda install pip pandas scipy
        +
        +
        +
          +
        1. Install PCNtoolkit (plus dependencies)

        2. +
        +
        pip install pcntoolkit
        +
        +
        +
        +
        +

        Alternative installation (on a shared resource)

        +
          +
        1. Make sure conda is available on the system. Otherwise install it first from https://www.anaconda.com/

        2. +
        +
        conda --version
        +
        +
        +
          +
        1. Create a conda environment in a shared location

        2. +
        +
        conda create -y python==3.7.7 numpy mkl blas --prefix=/shared/conda/<env_name>
        +
        +
        +
          +
        1. Activate the conda environment

        2. +
        +
        conda activate /shared/conda/<env_name>
        +
        +
        +
          +
        1. Install other dependencies

        2. +
        +
        conda install -y pandas scipy
        +
        +
        +
          +
        1. Install pip dependencies

        2. +
        +
        pip --no-cache-dir install nibabel sklearn torch glob3
        +
        +
        +
          +
        1. Clone the repo

        2. +
        +
        git clone https://github.com/amarquand/PCNtoolkit.git
        +
        +
        +
          +
        1. Install in the conda environment

        2. +
        +
        cd PCNtoolkit/
        +python3 setup.py install
        +
        +
        +
          +
        1. Test

        2. +
        +
        python -c "import pcntoolkit as pk;print(pk.__file__)"
        +
        +
        +
        +
        +

        Quickstart usage

        +

        For normative modelling, functionality is handled by the normative.py script, which can be run from the command line, e.g.

        +
        python normative.py -c /path/to/training/covariates -t /path/to/test/covariates -r /path/to/test/response/variables /path/to/my/training/response/variables
        +
        +
        +
        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/modindex.html b/doc/build/html/pages/modindex.html new file mode 100644 index 00000000..af96dcf0 --- /dev/null +++ b/doc/build/html/pages/modindex.html @@ -0,0 +1,1418 @@ + + + + + + + + + + Module Index — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Module Index

        +
        +
        +class bayesreg.BLR(**kwargs)[source]
        +

        Bases: object

        +

        Bayesian linear regression

        +

        Estimation and prediction of Bayesian linear regression models

        +

        Basic usage:

        +
        B = BLR()
        +hyp = B.estimate(hyp0, X, y)
        +ys,s2 = B.predict(hyp, X, y, Xs)
        +
        +
        +

        where the variables are

        +
        +
        Parameters
        +
          +
        • hyp – vector of hyperparmaters.

        • +
        • X – N x D data array

        • +
        • y – 1D Array of targets (length N)

        • +
        • Xs – Nte x D array of test cases

        • +
        • hyp0 – starting estimates for hyperparameter optimisation

        • +
        +
        +
        Returns
        +

          +
        • ys - predictive mean

        • +
        • s2 - predictive variance

        • +
        +

        +
        +
        +

        The hyperparameters are:

        +
        hyp = ( log(beta), log(alpha) )  # hyp is a list or numpy array
        +
        +
        +

        The implementation and notation mostly follows Bishop (2006). +The hyperparameter beta is the noise precision and alpha is the precision +over lengthscale parameters. This can be either a scalar variable (a +common lengthscale for all input variables), or a vector of length D (a +different lengthscale for each input variable, derived using an automatic +relevance determination formulation). These are estimated using conjugate +gradient optimisation of the marginal likelihood.

        +

        Reference: +Bishop (2006) Pattern Recognition and Machine Learning, Springer

        +

        Written by A. Marquand

        +
        +
        +dloglik(hyp, X, y, Xv=None)[source]
        +

        Function to compute derivatives

        +
        + +
        +
        +estimate(hyp0, X, y, **kwargs)[source]
        +

        Function to estimate the model

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates

        • +
        • y – responses

        • +
        • optimizer – optimisation algorithm (‘cg’,’powell’,’nelder-mead’,’l0bfgs-b’)

        • +
        +
        +
        +
        + +
        +
        +loglik(hyp, X, y, Xv=None)[source]
        +

        Function to compute compute log (marginal) likelihood

        +
        + +
        +
        +penalized_loglik(hyp, X, y, Xv=None, l=0.1, norm='L1')[source]
        +

        Function to compute the penalized log (marginal) likelihood

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates

        • +
        • y – responses

        • +
        • Xv – covariates for heteroskedastic noise

        • +
        • l – regularisation penalty

        • +
        • norm – type of regulariser (L1 or L2)

        • +
        +
        +
        +
        + +
        +
        +post(hyp, X, y, Xv=None)[source]
        +

        Generic function to compute posterior distribution.

        +

        This function will save the posterior mean and precision matrix as +self.m and self.A and will also update internal parameters (e.g. +N, D and the prior covariance (Sigma_a) and precision (Lambda_a).

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates

        • +
        • y – responses

        • +
        • Xv – covariates for heteroskedastic noise

        • +
        +
        +
        +
        + +
        +
        +predict(hyp, X, y, Xs, var_groups_test=None, var_covariates_test=None, **kwargs)[source]
        +

        Function to make predictions from the model

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates for training data

        • +
        • y – responses for training data

        • +
        • Xs – covariates for test data

        • +
        • var_covariates_test – test covariates for heteroskedastic noise

        • +
        +
        +
        +

        This always returns Gaussian predictions, i.e.

        +
        +
        Returns
        +

          +
        • ys - predictive mean

        • +
        • s2 - predictive variance

        • +
        +

        +
        +
        +
        + +
        +
        +predict_and_adjust(hyp, X, y, Xs=None, ys=None, var_groups_test=None, var_groups_adapt=None, **kwargs)[source]
        +

        Function to transfer the model to a new site. This is done by +first making predictions on the adaptation data given by X, +adjusting by the residuals with respect to y.

        +
        +
        Parameters
        +
          +
        • hyp – hyperparameter vector

        • +
        • X – covariates for adaptation (i.e. calibration) data

        • +
        • y – responses for adaptation data

        • +
        • Xs – covariate data (for which predictions should be adjusted)

        • +
        • ys – true response variables (to be adjusted)

        • +
        • var_groups_test – variance groups (e.g. sites) for test data

        • +
        • var_groups_adapt – variance groups for adaptation data

        • +
        +
        +
        +

        There are two possible ways of using this function, depending on +whether ys or Xs is specified

        +

        If ys is specified, this is applied directly to the data, which is +assumed to be in the input space (i.e. not warped). In this case +the adjusted true data points are returned in the same space

        +

        Alternatively, Xs is specified, then the predictions are made and +adjusted. In this case the predictive variance are returned in the +warped (i.e. Gaussian) space.

        +

        This function needs to know which sites are associated with which +data points, which provided by var_groups_xxx, which is a list or +array of scalar ids .

        +
        + +
        + +
        +
        +class gp.CovBase(x=None)[source]
        +

        Bases: object

        +

        Base class for covariance functions.

        +

        All covariance functions must define the following methods:

        +
        CovFunction.get_n_params()
        +CovFunction.cov()
        +CovFunction.xcov()
        +CovFunction.dcov()
        +
        +
        +
        +
        +abstract cov(theta, x, z=None)[source]
        +

        Return the full covariance (or cross-covariance if z is given)

        +
        + +
        +
        +abstract dcov(theta, x, i)[source]
        +

        Return the derivative of the covariance function with respect to +the i-th hyperparameter

        +
        + +
        +
        +get_n_params()[source]
        +

        Report the number of parameters required

        +
        + +
        + +
        +
        +class gp.CovLin(x=None)[source]
        +

        Bases: gp.CovBase

        +

        Linear covariance function (no hyperparameters)

        +
        +
        +cov(theta, x, z=None)[source]
        +

        Return the full covariance (or cross-covariance if z is given)

        +
        + +
        +
        +dcov(theta, x, i)[source]
        +

        Return the derivative of the covariance function with respect to +the i-th hyperparameter

        +
        + +
        +
        +get_n_params()
        +

        Report the number of parameters required

        +
        + +
        + +
        +
        +class gp.CovSqExp(x=None)[source]
        +

        Bases: gp.CovBase

        +

        Ordinary squared exponential covariance function. +The hyperparameters are:

        +
        theta = ( log(ell), log(sf) )
        +
        +
        +

        where ell is a lengthscale parameter and sf2 is the signal variance

        +
        +
        +cov(theta, x, z=None)[source]
        +

        Return the full covariance (or cross-covariance if z is given)

        +
        + +
        +
        +dcov(theta, x, i)[source]
        +

        Return the derivative of the covariance function with respect to +the i-th hyperparameter

        +
        + +
        +
        +get_n_params()
        +

        Report the number of parameters required

        +
        + +
        + +
        +
        +class gp.CovSqExpARD(x=None)[source]
        +

        Bases: gp.CovBase

        +

        Squared exponential covariance function with ARD +The hyperparameters are:

        +
        theta = (log(ell_1, ..., log_ell_D), log(sf))
        +
        +
        +

        where ell_i are lengthscale parameters and sf2 is the signal variance

        +
        +
        +cov(theta, x, z=None)[source]
        +

        Return the full covariance (or cross-covariance if z is given)

        +
        + +
        +
        +dcov(theta, x, i)[source]
        +

        Return the derivative of the covariance function with respect to +the i-th hyperparameter

        +
        + +
        +
        +get_n_params()
        +

        Report the number of parameters required

        +
        + +
        + +
        +
        +class gp.CovSum(x=None, covfuncnames=None)[source]
        +

        Bases: gp.CovBase

        +

        Sum of covariance functions. These are passed in as a cell array and +intialised automatically. For example:

        +
        C = CovSum(x,(CovLin, CovSqExpARD))
        +C = CovSum.cov(x, )
        +
        +
        +

        The hyperparameters are:

        +
        theta = ( log(ell_1, ..., log_ell_D), log(sf2) )
        +
        +
        +

        where ell_i are lengthscale parameters and sf2 is the signal variance

        +
        +
        +cov(theta, x, z=None)[source]
        +

        Return the full covariance (or cross-covariance if z is given)

        +
        + +
        +
        +dcov(theta, x, i)[source]
        +

        Return the derivative of the covariance function with respect to +the i-th hyperparameter

        +
        + +
        +
        +get_n_params()
        +

        Report the number of parameters required

        +
        + +
        + +
        +
        +class gp.GPR(hyp=None, covfunc=None, X=None, y=None, n_iter=100, tol=0.001, verbose=False, warp=None)[source]
        +

        Bases: object

        +

        Gaussian process regression

        +

        Estimation and prediction of Gaussian process regression models

        +

        Basic usage:

        +
        G = GPR()
        +hyp = B.estimate(hyp0, cov, X, y)
        +ys, ys2 = B.predict(hyp, cov, X, y, Xs)
        +
        +
        +

        where the variables are

        +
        +
        Parameters
        +
          +
        • hyp – vector of hyperparmaters

        • +
        • cov – covariance function

        • +
        • X – N x D data array

        • +
        • y – 1D Array of targets (length N)

        • +
        • Xs – Nte x D array of test cases

        • +
        • hyp0 – starting estimates for hyperparameter optimisation

        • +
        +
        +
        Returns
        +

          +
        • ys - predictive mean

        • +
        • ys2 - predictive variance

        • +
        +

        +
        +
        +

        The hyperparameters are:

        +
        hyp = ( log(sn), (cov function params) )  # hyp is a list or array
        +
        +
        +

        The implementation and notation follows Rasmussen and Williams (2006). +As in the gpml toolbox, these parameters are estimated using conjugate +gradient optimisation of the marginal likelihood. Note that there is no +explicit mean function, thus the gpr routines are limited to modelling +zero-mean processes.

        +

        Reference: +C. Rasmussen and C. Williams (2006) Gaussian Processes for Machine Learning

        +

        Written by A. Marquand

        +
        +
        +dloglik(hyp, covfunc, X, y)[source]
        +

        Function to compute derivatives

        +
        + +
        +
        +estimate(hyp0, covfunc, X, y, optimizer='cg')[source]
        +

        Function to estimate the model

        +
        + +
        +
        +loglik(hyp, covfunc, X, y)[source]
        +

        Function to compute compute log (marginal) likelihood

        +
        + +
        +
        +post(hyp, covfunc, X, y)[source]
        +

        Generic function to compute posterior distribution.

        +
        + +
        +
        +predict(hyp, X, y, Xs)[source]
        +

        Function to make predictions from the model

        +
        + +
        + +
        +
        +normative.estimate(covfile, respfile, **kwargs)[source]
        +

        Estimate a normative model

        +

        This will estimate a model in one of two settings according to +theparticular parameters specified (see below)

        +
          +
        • under k-fold cross-validation. +requires respfile, covfile and cvfolds>=2

        • +
        • estimating a training dataset then applying to a second test dataset. +requires respfile, covfile, testcov and testresp.

        • +
        • estimating on a training dataset ouput of forward maps mean and se. +requires respfile, covfile and testcov

        • +
        +

        The models are estimated on the basis of data stored on disk in ascii or +neuroimaging data formats (nifti or cifti). Ascii data should be in +tab or space delimited format with the number of subjects in rows and the +number of variables in columns. Neuroimaging data will be reshaped +into the appropriate format

        +

        Basic usage:

        +
        estimate(covfile, respfile, [extra_arguments])
        +
        +
        +

        where the variables are defined below. Note that either the cfolds +parameter or (testcov, testresp) should be specified, but not both.

        +
        +
        Parameters
        +
          +
        • respfile – response variables for the normative model

        • +
        • covfile – covariates used to predict the response variable

        • +
        • maskfile – mask used to apply to the data (nifti only)

        • +
        • cvfolds – Number of cross-validation folds

        • +
        • testcov – Test covariates

        • +
        • testresp – Test responses

        • +
        • alg – Algorithm for normative model

        • +
        • configparam – Parameters controlling the estimation algorithm

        • +
        • saveoutput – Save the output to disk? Otherwise returned as arrays

        • +
        • outputsuffix – Text string to add to the output filenames

        • +
        • inscale – Scaling approach for input covariates, could be ‘None’ (Default), +‘standardize’, ‘minmax’, or ‘robminmax’.

        • +
        • outscale – Scaling approach for output responses, could be ‘None’ (Default), +‘standardize’, ‘minmax’, or ‘robminmax’.

        • +
        +
        +
        +

        All outputs are written to disk in the same format as the input. These are:

        +
        +
        Outputs
        +
          +
        • yhat - predictive mean

        • +
        • ys2 - predictive variance

        • +
        • nm - normative model

        • +
        • Z - deviance scores

        • +
        • Rho - Pearson correlation between true and predicted responses

        • +
        • pRho - parametric p-value for this correlation

        • +
        • rmse - root mean squared error between true/predicted responses

        • +
        • smse - standardised mean squared error

        • +
        +
        +
        +

        The outputsuffix may be useful to estimate multiple normative models in the +same directory (e.g. for custom cross-validation schemes)

        +
        + +
        +
        +normative.evaluate(Y, Yhat, S2=None, mY=None, sY=None, nlZ=None, nm=None, Xz_tr=None, alg=None, metrics=['Rho', 'RMSE', 'SMSE', 'EXPV', 'MSLL'])[source]
        +

        Compute error metrics +This function will compute error metrics based on a set of predictions Yhat +and a set of true response variables Y, namely:

        +
          +
        • Rho: Pearson correlation

        • +
        • RMSE: root mean squared error

        • +
        • SMSE: standardized mean squared error

        • +
        • EXPV: explained variance

        • +
        +

        If the predictive variance is also specified the log loss will be computed +(which also takes into account the predictive variance). If the mean and +standard deviation are also specified these will be used to standardize +this, yielding the mean standardized log loss

        +
        +
        Parameters
        +
          +
        • Y – N x P array of true response variables

        • +
        • Yhat – N x P array of predicted response variables

        • +
        • S2 – predictive variance

        • +
        • mY – mean of the training set

        • +
        • sY – standard deviation of the training set

        • +
        +
        +
        Returns metrics
        +

        evaluation metrics

        +
        +
        +
        + +
        +
        +normative.extend(covfile, respfile, maskfile=None, **kwargs)[source]
        +
        + +
        +
        +normative.fit(covfile, respfile, **kwargs)[source]
        +
        + +
        +
        +normative.get_args(*args)[source]
        +

        Parse command line arguments

        +
        + +
        +
        +normative.load_response_vars(datafile, maskfile=None, vol=True)[source]
        +

        load response variables (of any data type)

        +
        + +
        +
        +normative.main(*args)[source]
        +

        Parse arguments and estimate model

        +
        + +
        +
        +normative.predict(covfile, respfile, maskfile=None, **kwargs)[source]
        +

        Make predictions on the basis of a pre-estimated normative model +If only the covariates are specified then only predicted mean and variance +will be returned. If the test responses are also specified then quantities +That depend on those will also be returned (Z scores and error metrics)

        +

        Basic usage:

        +
        predict(covfile, [extra_arguments])
        +
        +
        +

        where the variables are defined below.

        +
        +
        Parameters
        +
          +
        • covfile – test covariates used to predict the response variable

        • +
        • respfile – test response variables for the normative model

        • +
        • maskfile – mask used to apply to the data (nifti only)

        • +
        • model_path – Directory containing the normative model and metadata. +When using parallel prediction, do not pass the model path. It will be +automatically decided.

        • +
        • outputsuffix – Text string to add to the output filenames

        • +
        • batch_size – batch size (for use with normative_parallel)

        • +
        • job_id – batch id

        • +
        +
        +
        +

        All outputs are written to disk in the same format as the input. These are:

        +
        +
        Outputs
        +
          +
        • Yhat - predictive mean

        • +
        • S2 - predictive variance

        • +
        • Z - Z scores

        • +
        +
        +
        +
        + +
        +
        +normative.save_results(respfile, Yhat, S2, maskvol, Z=None, outputsuffix=None, results=None, save_path='')[source]
        +
        + +
        +
        +normative.transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None, **kwargs)[source]
        +

        Transfer learning on the basis of a pre-estimated normative model by using +the posterior distribution over the parameters as an informed prior for +new data. currently only supported for HBR.

        +

        Basic usage:

        +
        transfer(covfile, respfile [extra_arguments])
        +
        +
        +

        where the variables are defined below.

        +
        +
        Parameters
        +
          +
        • covfile – test covariates used to predict the response variable

        • +
        • respfile – test response variables for the normative model

        • +
        • maskfile – mask used to apply to the data (nifti only)

        • +
        • testcov – Test covariates

        • +
        • testresp – Test responses

        • +
        • model_path – Directory containing the normative model and metadata

        • +
        • trbefile – Training batch effects file

        • +
        • batch_size – batch size (for use with normative_parallel)

        • +
        • job_id – batch id

        • +
        +
        +
        +

        All outputs are written to disk in the same format as the input. These are:

        +
        +
        Outputs
        +
          +
        • Yhat - predictive mean

        • +
        • S2 - predictive variance

        • +
        • Z - Z scores

        • +
        +
        +
        +
        + +
        +
        +normative_parallel.bashwrap_nm(processing_dir, python_path, normative_path, job_name, covfile_path, respfile_path, func='estimate', **kwargs)[source]
        +

        This function wraps normative modelling into a bash script to run it +on a torque cluster system.

        +
        +
        Parameters
        +
          +
        • processing_dir -> Full path to the processing dir

        • +
        • python_path -> Full path to the python distribution

        • +
        • normative_path -> Full path to the normative.py

        • +
        • +
          job_name -> Name for the bash script that is the output of

          this function

          +
          +
          +
        • +
        • +
          covfile_path -> Full path to a .txt file that contains all

          covariats (subjects x covariates) for the +responsefile

          +
          +
          +
        • +
        • +
          respfile_path -> Full path to a .txt that contains all features

          (subjects x features)

          +
          +
          +
        • +
        • cv_folds -> Number of cross validations

        • +
        • +
          testcovfile_path -> Full path to a .txt file that contains all

          covariats (subjects x covariates) for the +testresponse file

          +
          +
          +
        • +
        • +
          testrespfile_path -> Full path to a .txt file that contains all

          test features

          +
          +
          +
        • +
        • alg -> which algorithm to use

        • +
        • configparam -> configuration parameters for this algorithm

        • +
        +
        +
        Outputs
        +
          +
        • A bash.sh file containing the commands for normative modelling saved +to the processing directory (written to disk)

        • +
        +
        +
        +

        written by (primarily) T Wolfers

        +
        + +
        +
        +normative_parallel.collect_nm(processing_dir, job_name, func='estimate', collect=False, binary=False, batch_size=None, outputsuffix='_estimate')[source]
        +

        This function checks and collects all batches.

        +
        +
        Parameters
        +
          +
        • processing_dir -> Full path to the processing directory

        • +
        • +
          collect -> If True data is checked for failed batches

          and collected; if False data is just checked

          +
          +
          +
        • +
        • binary -> Results in pkl format?

        • +
        +
        +
        Ouptuts
        +
          +
        • Text files containing all results accross all batches the combined +output (written to disk)

        • +
        • returns 0 if batches fail, 1 otherwise

        • +
        +
        +
        +

        written by (primarily) T Wolfers, (adapted) SM Kia

        +
        + +
        +
        +normative_parallel.delete_nm(processing_dir, binary=False)[source]
        +

        This function deletes all processing for normative modelling and just +keeps the combined output.

        +
        +
        Parameters
        +
          +
        • processing_dir -> Full path to the processing directory

        • +
        • binary -> Results in pkl format?

        • +
        +
        +
        +

        written by (primarily) T Wolfers, (adapted) SM Kia

        +
        + +
        +
        +normative_parallel.execute_nm(processing_dir, python_path, job_name, covfile_path, respfile_path, batch_size, memory, duration, normative_path=None, func='estimate', **kwargs)[source]
        +

        This function is a mother function that executes all parallel normative +modelling routines. Different specifications are possible using the sub- +functions.

        +
        +
        Parameters
        +
          +
        • processing_dir -> Full path to the processing dir

        • +
        • python_path -> Full path to the python distribution

        • +
        • +
          normative_path -> Full path to the normative.py. If None (default)

          then it will automatically retrieves the path from +the installed packeage.

          +
          +
          +
        • +
        • +
          job_name -> Name for the bash script that is the output of

          this function

          +
          +
          +
        • +
        • +
          covfile_path -> Full path to a .txt file that contains all

          covariats (subjects x covariates) for the +responsefile

          +
          +
          +
        • +
        • +
          respfile_path -> Full path to a .txt that contains all features

          (subjects x features)

          +
          +
          +
        • +
        • batch_size -> Number of features in each batch

        • +
        • +
          memory -> Memory requirements written as string

          for example 4gb or 500mb

          +
          +
          +
        • +
        • +
          duation -> The approximate duration of the job, a string

          with HH:MM:SS for example 01:01:01

          +
          +
          +
        • +
        • cv_folds -> Number of cross validations

        • +
        • +
          testcovfile_path -> Full path to a .txt file that contains all

          covariats (subjects x covariates) for the +testresponse file

          +
          +
          +
        • +
        • +
          testrespfile_path -> Full path to a .txt file that contains all

          test features

          +
          +
          +
        • +
        • log_path -> Pathfor saving log files

        • +
        • +
          binary -> If True uses binary format for response file

          otherwise it is text

          +
          +
          +
        • +
        +
        +
        +

        written by (primarily) T Wolfers, (adapted) SM Kia

        +
        + +
        +
        +normative_parallel.qsub_nm(job_path, log_path, memory, duration)[source]
        +

        This function submits a job.sh scipt to the torque custer using the qsub +command.

        +
        +
        ** Input:
          +
        • job_path -> Full path to the job.sh file

        • +
        • +
          memory -> Memory requirements written as string for example

          4gb or 500mb

          +
          +
          +
        • +
        • +
          duation -> The approximate duration of the job, a string with

          HH:MM:SS for example 01:01:01

          +
          +
          +
        • +
        +
        +
        ** Output:
          +
        • Submission of the job to the (torque) cluster

        • +
        +
        +
        +

        witten by (primarily) T Wolfers, (adapted) SM Kia

        +
        + +
        +
        +normative_parallel.rerun_nm(processing_dir, log_path, memory, duration, binary=False)[source]
        +

        This function reruns all failed batched in processing_dir after collect_nm +has identified he failed batches

        +
          +
        • +
          Input:
            +
          • processing_dir -> Full path to the processing directory

          • +
          • +
            memory -> Memory requirements written as string

            for example 4gb or 500mb

            +
            +
            +
          • +
          • +
            duration -> The approximate duration of the job, a

            string with HH:MM:SS for example 01:01:01

            +
            +
            +
          • +
          +
          +
          +
        • +
        +

        written by (primarily) T Wolfers, (adapted) SM Kia

        +
        + +
        +
        +normative_parallel.sbatch_nm(job_path, log_path)[source]
        +

        This function submits a job.sh scipt to the torque custer using the qsub +command.

        +
        +
        ** Input:
          +
        • job_path -> Full path to the job.sh file

        • +
        • log_path -> The logs are currently stored in the working dir

        • +
        +
        +
        ** Output:
          +
        • Submission of the job to the (torque) cluster

        • +
        +
        +
        +

        witten by (primarily) T Wolfers

        +
        + +
        +
        +normative_parallel.sbatchwrap_nm(processing_dir, python_path, normative_path, job_name, covfile_path, respfile_path, memory, duration, func='estimate', **kwargs)[source]
        +

        This function wraps normative modelling into a bash script to run it +on a torque cluster system.

        +
        +
        Parameters
        +
          +
        • processing_dir -> Full path to the processing dir

        • +
        • python_path -> Full path to the python distribution

        • +
        • normative_path -> Full path to the normative.py

        • +
        • +
          job_name -> Name for the bash script that is the output of

          this function

          +
          +
          +
        • +
        • +
          covfile_path -> Full path to a .txt file that contains all

          covariats (subjects x covariates) for the +responsefile

          +
          +
          +
        • +
        • +
          respfile_path -> Full path to a .txt that contains all features

          (subjects x features)

          +
          +
          +
        • +
        • cv_folds -> Number of cross validations

        • +
        • +
          testcovfile_path -> Full path to a .txt file that contains all

          covariats (subjects x covariates) for the +testresponse file

          +
          +
          +
        • +
        • +
          testrespfile_path -> Full path to a .txt file that contains all

          test features

          +
          +
          +
        • +
        • alg -> which algorithm to use

        • +
        • configparam -> configuration parameters for this algorithm

        • +
        +
        +
        Outputs
        +
          +
        • A bash.sh file containing the commands for normative modelling saved +to the processing directory (written to disk)

        • +
        +
        +
        +

        written by (primarily) T Wolfers

        +
        + +
        +
        +normative_parallel.split_nm(processing_dir, respfile_path, batch_size, binary, **kwargs)[source]
        +

        This function prepares the input files for normative_parallel.

        +
        +
        Parameters
        +
          +
        • processing_dir -> Full path to the folder of processing

        • +
        • +
          respfile_path -> Full path to the responsefile.txt

          (subjects x features)

          +
          +
          +
        • +
        • batch_size -> Number of features in each batch

        • +
        • +
          testrespfile_path -> Full path to the test responsefile.txt

          (subjects x features)

          +
          +
          +
        • +
        • binary -> If True binary file

        • +
        +
        +
        Outputs
        +
          +
        • The creation of a folder struture for batch-wise processing

        • +
        +
        +
        +

        witten by (primarily) T Wolfers (adapted) SM Kia

        +
        + +
        +
        +trendsurf.create_basis(X, basis, mask)[source]
        +

        Create a (polynomial) basis set

        +
        + +
        +
        +trendsurf.estimate(filename, maskfile, basis, ard=False, outputall=False, saveoutput=True)[source]
        +

        Estimate a trend surface model

        +

        This will estimate a trend surface model, independently for each subject. +This is currently fit using a polynomial model of a specified degree. +The models are estimated on the basis of data stored on disk in ascii or +neuroimaging data formats (currently nifti only). Ascii data should be in +tab or space delimited format with the number of voxels in rows and the +number of subjects in columns. Neuroimaging data will be reshaped +into the appropriate format

        +

        Basic usage:

        +
        estimate(filename, maskfile, basis)
        +
        +
        +

        where the variables are defined below. Note that either the cfolds +parameter or (testcov, testresp) should be specified, but not both.

        +
        +
        Parameters
        +
          +
        • filename – 4-d nifti file containing the images to be estimated

        • +
        • maskfile – nifti mask used to apply to the data

        • +
        • basis – model order for the interpolating polynomial

        • +
        +
        +
        +

        All outputs are written to disk in the same format as the input. These are:

        +
        +
        Outputs
        +
          +
        • yhat - predictive mean

        • +
        • ys2 - predictive variance

        • +
        • trendcoeff - coefficients from the trend surface model

        • +
        • negloglik - Negative log marginal likelihood

        • +
        • hyp - hyperparameters

        • +
        • explainedvar - explained variance

        • +
        • rmse - standardised mean squared error

        • +
        +
        +
        +
        + +
        +
        +trendsurf.get_args(*args)[source]
        +
        + +
        +
        +trendsurf.load_data(datafile, maskfile=None)[source]
        +

        load 4d nifti data

        +
        + +
        +
        +trendsurf.main(*args)[source]
        +
        + +
        +
        +trendsurf.write_nii(data, filename, examplenii, mask)[source]
        +

        Write output to nifti

        +
        + +
        +
        +class rfa.GPRRFA(hyp=None, X=None, y=None, n_feat=None, n_iter=100, tol=0.001, verbose=False)[source]
        +

        Bases: object

        +

        Random Feature Approximation for Gaussian Process Regression

        +

        Estimation and prediction of Bayesian linear regression models

        +

        Basic usage:

        +
        R = GPRRFA()
        +hyp = R.estimate(hyp0, X, y)
        +ys,s2 = R.predict(hyp, X, y, Xs)
        +
        +
        +

        where the variables are

        +
        +
        Parameters
        +
          +
        • hyp – vector of hyperparmaters.

        • +
        • X – N x D data array

        • +
        • y – 1D Array of targets (length N)

        • +
        • Xs – Nte x D array of test cases

        • +
        • hyp0 – starting estimates for hyperparameter optimisation

        • +
        +
        +
        Returns
        +

          +
        • ys - predictive mean

        • +
        • s2 - predictive variance

        • +
        +

        +
        +
        +

        The hyperparameters are:

        +
        hyp = [ log(sn), log(ell), log(sf) ]  # hyp is a numpy array
        +
        +
        +

        where sn^2 is the noise variance, ell are lengthscale parameters and +sf^2 is the signal variance. This provides an approximation to the +covariance function:

        +
        k(x,z) = x'*z + sn2*exp(0.5*(x-z)'*Lambda*(x-z))
        +
        +
        +

        where Lambda = diag((ell_1^2, … ell_D^2))

        +

        Written by A. Marquand

        +
        +
        +dloglik(hyp, X, y)[source]
        +

        Function to compute derivatives

        +
        + +
        +
        +estimate(hyp0, X, y, optimizer='lbfgs')[source]
        +

        Function to estimate the model

        +
        + +
        +
        +get_n_params(X)[source]
        +
        + +
        +
        +loglik(hyp, X, y)[source]
        +

        Function to compute compute log (marginal) likelihood

        +
        + +
        +
        +post(hyp, X, y)[source]
        +

        Generic function to compute posterior distribution.

        +

        This function will save the posterior mean and precision matrix as +self.m and self.A and will also update internal parameters (e.g. +N, D and the prior covariance (Sigma) and precision (iSigma).

        +
        + +
        +
        +predict(hyp, X, y, Xs)[source]
        +

        Function to make predictions from the model

        +
        + +
        + +
        +
        +fileio.alphanum_key(s)[source]
        +
        + +
        +
        +fileio.create_mask(data_array, mask, verbose=False)[source]
        +
        + +
        +
        +fileio.file_extension(filename)[source]
        +
        + +
        +
        +fileio.file_stem(filename)[source]
        +
        + +
        +
        +fileio.file_type(filename)[source]
        +
        + +
        +
        +fileio.load(filename, mask=None, text=False, vol=True)[source]
        +
        + +
        +
        +fileio.load_ascii(filename)[source]
        +
        + +
        +
        +fileio.load_cifti(filename, vol=False, mask=None, rmtmp=True)[source]
        +
        + +
        +
        +fileio.load_nifti(datafile, mask=None, vol=False, verbose=False)[source]
        +
        + +
        +
        +fileio.load_pd(filename)[source]
        +
        + +
        +
        +fileio.predictive_interval(s2_forward, cov_forward, multiplicator)[source]
        +
        + +
        +
        +fileio.save(data, filename, example=None, mask=None, text=False)[source]
        +
        + +
        +
        +fileio.save_ascii(data, filename)[source]
        +
        + +
        +
        +fileio.save_cifti(data, filename, example, mask=None, vol=True, volatlas=None)[source]
        +

        Write output to nifti

        +
        + +
        +
        +fileio.save_nifti(data, filename, examplenii, mask)[source]
        +

        Write output to nifti

        +
        + +
        +
        +fileio.save_pd(data, filename)[source]
        +
        + +
        +
        +fileio.sort_nicely(l)[source]
        +
        + +
        +
        +fileio.tryint(s)[source]
        +
        + +
        +
        +fileio.vol2vec(dat, mask, verbose=False)[source]
        +
        + +
        + + +
        + +
        +
        + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand. + +

        +
        + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
        +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/pcntoolkit_background.html b/doc/build/html/pages/pcntoolkit_background.html new file mode 100644 index 00000000..92efcad8 --- /dev/null +++ b/doc/build/html/pages/pcntoolkit_background.html @@ -0,0 +1,461 @@ + + + + + + + + + + PCNtoolkit Background — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        PCNtoolkit Background

        +
        +

        What is the PCNtoolkit?

        +

        Predictive Clinical Neuroscience (PCN) toolkit (formerly nispat) is a python package designed for multi-purpose tasks in clinical neuroimaging, including normative modelling, trend surface modelling in addition to providing implementations of a number of fundamental machine learning algorithms.

        +
        +
        +
        +

        Intro to normative modelling

        +

        Normative modelling essentially aims to predict centiles of variance in a response variable (e.g. a region of interest or other neuroimaging-derived measure) on the basis of a set of covariates (e.g. age, clinical scores, diagnosis) A conceptual overview of the approach can be found in this publication. For example, the image below shows an example of a normative model that aims to predict vertex-wise cortical thickness data, essentially fitting a separate model for each vertex.

        +
        +../_images/nm_concept.png +
        +

        In practice, this is done by regressing the biological response variables against a set of clinical or demographic covariates. In the instructions that follow, it is helpful to think of these as being stored in matrices as shown below:

        +
        +../_images/nm_overview.png +
        +

        There are many options for this, but techniques that provide a distributional form for the centiles are appealing, since they help to estimate extreme centiles more efficiently. Bayesian methods are also beneficial in this regard because they also allow separation of modelling uncertainty from variation in the data. Many applications of normative modelling use Gaussian Process Regression, which is the default method in this toolkit. Typically (but not always), each response variable is estimated independently.

        +
        +

        Data formats

        +

        Generally the covariates are specified in text format, roughly following the FSL convention in that the text file should contain one entry +(i.e. subject) per line, with columns space or tab separated and no headers. For example:

        +
        head cov.txt
        +52 55 94 4.6
        +49 43 59 4.6
        +56 80 63 5.6
        +39 48 42 4.3
        +
        +
        +

        For the response variables, the following data formats are supported:

        +
          +
        • NIfTI (e.g. .nii.gz or .img/.hdr)

        • +
        • CIFTI (e.g. .dtseries.nii)

        • +
        • Pickle/pandas (e.g. .pkl)

        • +
        • ASCII text (e.g. .txt, .csv, .tsv)

        • +
        +

        For nifti/cifti formats, data should be in timeseries format with subjects along the time dimension and these images will be masked and reshaped into vectors. If no mask is specified, one will be created automatically from the image data.

        +
        +
        +

        Basic usage (command line)

        +

        The simplest method to estimate a normative model is using the normative.py script which can be run from the command line or imported as a python module. For example, the following command will estimate a normative model on the basis of the matrix of covariates and responses specified in cov.txt and resp.txt respectively. These are simply tab or space separated ASCII text files that contain the variables of interest, with one subject per row.

        +
        python normative.py -c cov.txt -k 5 -a blr resp.txt
        +
        +
        +

        The argument -a blr tells the script to use Bayesian Linear regression rather than the default Gaussian process regression model and -k 5 tells the script to run internal 5-fold cross-validation across all subjects in the covariates and responses files. Alternatively, the model can be evaluated on a separate dataset by specifying test covariates (and optionally also test responses). +The following estimation algorithms are supported

        +

        Table 1: Estimation algorithms

        + +++++ + + + + + + + + + + + + + + + + + + + + + + +

        key value

        Description

        Reference

        hbr

        Hierarchical Bayesian Regression

        Kia et al 2020

        blr

        Bayesian Linear Regression

        Huertas et al 2017

        np

        Neural Processes

        Kia et al 2018

        rfa

        Random Feature Approximation

        Rahimi and Recht 2007

        +

        Note that keyword arguments can also be specified from the command line to offer additional flexibility. For example, the following command will fit a normative model to the same data, but without standardizing the data first and additionally writing out model coefficients (this is not done by default because they can use a lot of disk space).

        +
        python normative.py -c cov.txt -k 5 -a blr resp.txt standardize=False savemodel=True
        +
        +
        +

        A full set of keyword arguments is provided in the table below. At a minimum, a set of responses and covariates must be provided and either the corresponding number of cross-validation folds or a set of test covariates.

        +

        Table 2: Keywords and command line arguments

        + +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Keyword

        Command line shortcut

        Description

        covfunc

        -c filename

        Covariate file

        cvfolds

        -k num_folds

        Number of cross-validation folds

        testcov

        -t filename

        Test covariates

        testresp

        -r filename

        Test responses

        maskfile

        -m filename

        mask to apply to the response variables (nifti/cifti only)

        alg

        -a algorithm

        Estimation algorithm: ‘gpr’ (default), ‘blr’, ‘np’, ‘hbr’ or ‘rfa’. See table above.

        function

        -f function

        function to call (estimate, predict, transfer, extend). See below

        standardize

        -s (skip)

        Standardize the covariates and response variables using the training data

        configparam

        -x config

        Pass the value of config to the estimation algorithm (deprecated)

        outputsuffix

        Suffix to apply to the output variables

        saveoutput

        Write output (default = True)

        savemodel

        Save the model coefficients and meta-data (default = False)

        warp

        Warping function to apply to the responses (blr only)

        +
        +
        +

        Basic usage (scripted)

        +

        The same can be done by importing the estimate function from normative.py. For example, the following code snippet will: (i) mask the nifti data specified in resp_train.nii.gz using the mask specified (which must have the same voxel size as the response variables) (ii) fit a linear normative model to each voxel, (iii) apply this to make predictions using the test covariates and (iv) compute deviation scores and error metrics by comparing against the true test response variables.

        +
        from pcntoolkit.normative import estimate
        +
        +# estimate a normative model
        +estimate("cov_train.txt", "resp_train.nii.gz", maskfile="mask.nii.gz", \
        +        testresp="resp_test.nii.gz", testcov="cov_test.txt", alg="blr")
        +
        +
        +

        The estimate function does all these operations in a single step. In some cases it may be desirable to separate these steps. For example, if a normative model has been estimated on a large dataset, it may be desirable to save the model before applying it to a new dataset (e.g. from a a different site). For example, the following code snippet will first fit a model, then apply it to a set of dummy covariates so that the normative model can be plotted

        +
        from pcntoolkit.normative import estimate, predict
        +
        +# fit a normative model, using training covariates and responses
        +# then apply to test dataset. Saved with file suffix '_estimate'
        +estimate(cov_file_tr, resp_file_tr, testresp=resp_file_te, \
        +        testcov=cov_file_te, alg='blr', optimizer = 'powell', \
        +        savemodel=True, standardize = False)
        +
        +# make predictions on a set of dummy covariates (with no responses)
        +# Saved with file suffix '_predict'
        +yhat, s2 = predict(cov_file_dummy)
        +
        +
        +

        For further information, see the developer documentation. The same can be achieved from the command line, using te -f argument, for example, by specifying -f predict.

        +
        +
        +

        Paralellising estimation to speed things up

        +

        Normative model estimation is typically quite computationally expensive, especially for large datasets. This is exacerbated by high-resolution data (e.g. voxelwise data). For such cases normative model estimation can be paralellised across multiple compute nodes which can be achieved using the normative_parallel.py script. This involves splitting the response matrix into a set of batches, each of a specified size, i.e.:

        +
        +../_images/nm_parallel.png +
        +

        Each of these are then submitted to a cluster and reassembled once the cluster jobs have been completed. The following code snippet illustrates this procedure:

        +
        from pcntoolkit.normative_parallel import execute_nm, collect_nm, delete_nm
        +
        +# General config parameters
        +normative_path = '/<path-to-my>/pcntoolkit/normative.py'
        +python_path='/<path-to-my>/bin/python'
        +working_dir = '/<where-results-will-be_stored>/'
        +log_dir = '/<where-logs-will-be_stored>/'
        +
        +# cluster paramateters
        +job_name = 'nm_demo'   # name for the cluster job
        +batch_size = 10        # number of models (e.g. voxels) per batch
        +memory = '4gb'         # memory required
        +duration = '01:00:00'  # walltime
        +cluster = 'torque'
        +
        +# fit the model. Specifying binary=True means results will be stored in .pkl format
        +execute_nm(working_dir, python_path, normative_path, job_name, cov_file.txt, \
        +        resp_file.pkl, batch_size, memory, duration, cluster_spec=cluster, \
        +        cv_folds=2, log_path=log_dir, binary=True)
        +
        +# wait until jobs complete ...
        +
        +# reassemble results
        +collect_nm(working_dir, job_name, collect=True, binary=True)
        +
        +# remove temporary files
        +delete_nm(working_dir, binary=True)
        +
        +
        +

        At the present time, only ASCII and pickle format are supported using normative parallel. Note also that it may be necessary to customise the script to support your local cluster architecture. This can be done using fairly obvious modifications to the execute_nm() function.

        +
        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/references.html b/doc/build/html/pages/references.html new file mode 100644 index 00000000..7b1398f6 --- /dev/null +++ b/doc/build/html/pages/references.html @@ -0,0 +1,255 @@ + + + + + + + + + + List of references — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        References

        +

        Marquand, A. F., Wolfers, T., Mennes, M., Buitelaar, J., & Beckmann, C. F. (2016). Beyond Lumping and Splitting: A Review of Computational Approaches for Stratifying Psychiatric Disorders. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 1(5), 433–447. https://doi.org/10.1016/j.bpsc.2016.04.002

        +

        Marquand, A. F., Rezek, I., Buitelaar, J., & Beckmann, C. F. (2016). Understanding Heterogeneity in Clinical Cohorts Using Normative Models: Beyond Case-Control Studies. Biological Psychiatry, 80(7), 552–561. https://doi.org/10.1016/j.biopsych.2015.12.023

        +

        Marquand, A. F., Kia, S. M., Zabihi, M., Wolfers, T., Buitelaar, J. K., & Beckmann, C. F. (2019). Conceptualizing mental disorders as deviations from normative functioning. Molecular Psychiatry, 24(10), 1415–1424. https://doi.org/10.1038/s41380-019-0441-1

        +

        Marquand, A. F., Haak, K. V., & Beckmann, C. F. (2017). Functional corticostriatal connection topographies predict goal directed behaviour in humans. Nature Human Behaviour, 1(8). https://doi.org/10.1038/s41562-017-0146

        +

        Wolfers, T., Beckmann, C. F., Hoogman, M., Buitelaar, J. K., Franke, B., & Marquand, A. F. (2020). Individual differences v. the average patient: Mapping the heterogeneity in ADHD using normative models. Psychological Medicine, 50(2), 314–323. https://doi.org/10.1017/S0033291719000084

        +

        Wolfers, T., Rokicki, J., Alnæs, D., Berthet, P., Agartz, I., Kia, S. M., Kaufmann, T., Zabihi, M., Moberget, T., Melle, I., Beckmann, C. F., Andreassen, O. A., Marquand, A. F., & Westlye, L. T. (n.d.). Replicating extensive brain structural heterogeneity in individuals with schizophrenia and bipolar disorder. Human Brain Mapping, n/a(n/a). https://doi.org/10.1002/hbm.25386

        +

        Zabihi, M., Floris, D. L., Kia, S. M., Wolfers, T., Tillmann, J., Arenas, A. L., Moessnang, C., Banaschewski, T., Holt, R., Baron-Cohen, S., Loth, E., Charman, T., Bourgeron, T., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. (2020). Fractionating autism based on neuroanatomical normative modeling. Translational Psychiatry, 10(1), 1–10. https://doi.org/10.1038/s41398-020-01057-0

        +

        Zabihi, M., Oldehinkel, M., Wolfers, T., Frouin, V., Goyard, D., Loth, E., Charman, T., Tillmann, J., Banaschewski, T., Dumas, G., Holt, R., Baron-Cohen, S., Durston, S., Bölte, S., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. F. (2019). Dissecting the Heterogeneous Cortical Anatomy of Autism Spectrum Disorder Using Normative Models. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 4(6), 567–578. https://doi.org/10.1016/j.bpsc.2018.11.013

        +

        Kia, S. M., & Marquand, A. (2018). Normative Modeling of Neuroimaging Data using Scalable Multi-Task Gaussian Processes. ArXiv:1806.01047 [Cs, Stat]. http://arxiv.org/abs/1806.01047

        +

        Kia, S. M., Beckmann, C. F., & Marquand, A. F. (2018). Scalable Multi-Task Gaussian Process Tensor Regression for Normative Modeling of Structured Variation in Neuroimaging Data. ArXiv:1808.00036 [Cs, Stat]. http://arxiv.org/abs/1808.00036

        +

        Kia, S. M., Huijsdens, H., Dinga, R., Wolfers, T., Mennes, M., Andreassen, O. A., Westlye, L. T., Beckmann, C. F., & Marquand, A. F. (2020). Hierarchical Bayesian Regression for Multi-site Normative Modeling of Neuroimaging Data. In A. L. Martel, P. Abolmaesumi, D. Stoyanov, D. Mateus, M. A. Zuluaga, S. K. Zhou, D. Racoceanu, & L. Joskowicz (Eds.), Medical Image Computing and Computer Assisted Intervention – MICCAI 2020 (pp. 699–709). Springer International Publishing. https://doi.org/10.1007/978-3-030-59728-3_68

        +

        Huertas, I., Oldehinkel, M., van Oort, E. S. B., Garcia-Solis, D., Mir, P., Beckmann, C. F., & Marquand, A. F. (2017). A Bayesian spatial model for neuroimaging data based on biologically informed basis functions. NeuroImage, 161, 134–148. https://doi.org/10.1016/j.neuroimage.2017.08.009

        +

        Fraza, C. J., Dinga, R., Beckmann, C. F., & Marquand, A. F. (2021). Warped Bayesian Linear Regression for Normative Modelling of Big Data. BioRxiv, 2021.04.05.438429. https://doi.org/10.1101/2021.04.05.438429

        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/scripts.html b/doc/build/html/pages/scripts.html new file mode 100644 index 00000000..5e70be4b --- /dev/null +++ b/doc/build/html/pages/scripts.html @@ -0,0 +1,429 @@ + + + + + + + + + + Intro to normative modelling — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Intro to normative modelling

        +

        Normative modelling essentially aims to predict centiles of variance in a response variable (e.g. a region of interest or other neuroimaging-derived measure) on the basis of a set of covariates (e.g. age, clinical scores, diagnosis) A conceptual overview of the approach can be found in [this publication](https://www.nature.com/articles/s41380-019-0441-1). For example, the image below shows an example of a normative model that aims to predict vertex-wise cortical thickness data, essentially fitting a separate model for each vertex.

        +
        +../_images/nm_concept.png +
        +

        In practice, this is done by regressing the biological response variables against a set of clinical or demographic covariates. In the instructions that follow, it is helpful to think of these as being stored in matrices as shown below:

        +
        +../_images/nm_overview.png +
        +

        There are many options for this, but techniques that provide a distributional form for the centiles are appealing, since they help to estimate extreme centiles more efficiently. Bayesian methods are also beneficial in this regard because they also allow separation of modelling uncertainty from variation in the data. Many applications of normative modelling use Gaussian Process Regression, which is the default method in this toolkit. Typically (but not [always](https://link.springer.com/chapter/10.1007/978-3-030-00931-1_15)), each response variable is estimated independently.

        +
        +

        Data formats

        +

        Generally the covariates are specified in text format, roughly following the FSL convention in that the text file should contain one entry +(i.e. subject) per line, with columns space or tab separated and no headers. For example:

        +
        head cov.txt
        +52 55 94 4.6
        +49 43 59 4.6
        +56 80 63 5.6
        +39 48 42 4.3
        +
        +
        +

        For the response variables, the following data formats are supported:

        +
          +
        • NIfTI (e.g. .nii.gz or .img/.hdr)

        • +
        • CIFTI (e.g. .dtseries.nii)

        • +
        • Pickle/pandas (e.g. .pkl)

        • +
        • ASCII text (e.g. .txt, .csv, .tsv)

        • +
        +

        For nifti/cifti formats, data should be in timeseries format with subjects along the time dimension and these images will be masked and reshaped into vectors. If no mask is specified, one will be created automatically from the image data.

        +
        +
        +

        Basic usage (command line)

        +

        The simplest method to estimate a normative model is using the `normative.py` script which can be run from the command line or imported as a python module. For example, the following command will estimate a normative model on the basis of the matrix of covariates and responses specified in cov.txt and resp.txt respectively. These are simply tab or space separated ASCII text files that contain the variables of interest, with one subject per row.

        +
        python normative.py -c cov.txt -k 5 -a blr resp.txt
        +
        +
        +

        The argument -a blr tells the script to use Bayesian Linear regression rather than the default Gaussian process regression model and -k 5 tells the script to run internal 5-fold cross-validation across all subjects in the covariates and responses files. Alternatively, the model can be evaluated on a separate dataset by specifying test covariates (and optionally also test responses). +The following estimation algorithms are supported

        +

        Table 1: Estimation algorithms +.. list-table:

        +
        :widths: 50 50 50
        +:header-rows: 1
        +
        + * -key value
        +   -Description
        +   -Reference
        + * -gpr (default)
        +   -Gaussian Process Regression
        +   -Marquand et al 2016 https://www.sciencedirect.com/science/article/pii/S0006322316000020
        + * -hbr
        +   -Hierarchical Bayesian Regression
        +   -Kia et al 2020 https://arxiv.org/abs/2005.12055
        + * -blr
        +   -Bayesian Linear Regression
        +   -Huertas et al 2017 https://www.sciencedirect.com/science/article/pii/S1053811917306560
        + * -np
        +   -Neural Processes
        +   -Kia et al 2018 https://arxiv.org/abs/1812.04998
        + * -rfa
        +   -Random Feature Approximation
        +   -Rahimi and Recht 2007 https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
        +
        +
        +

        Note that keyword arguments can also be specified from the command line to offer additional flexibility. For example, the following command will fit a normative model to the same data, but without standardizing the data first and additionally writing out model coefficients (this is not done by default because they can use a lot of disk space).

        +
        python normative.py -c cov.txt -k 5 -a blr resp.txt standardize=False savemodel=True
        +
        +
        +

        A full set of keyword arguments is provided in the table below. At a minimum, a set of responses and covariates must be provided and either the corresponding number of cross-validation folds or a set of test covariates.

        +

        Table 2: Keywords and command line arguments +.. list-table:

        +
        :widths: 50 50 50
        +:header-rows: 1
        +
        + * -keyword
        +   -Command line shortcut
        +   -Description
        + * -covfunc
        +   --c filename
        +   -Covariate file
        + * -cvfolds
        +   --k num_folds
        +   -Number of cross-validation folds
        + * -testcov
        +   --t filename
        +   -Test covariates
        + * -testresp
        +   --r filename
        +   -Test responses
        + * maskfile
        +   --m filename
        +   -mask to apply to the response variables (nifti/cifti only)
        + * -alg
        +   --a algorithm
        +   -Estimation algorithm: 'gpr' (default), 'blr', 'np', 'hbr' or 'rfa'. See table above.
        + * -function
        +   --f function
        +   -function to call (estimate, predict, transfer, extend). See below
        + * -standardize
        +   --s (skip)
        +   -Standardize the covariates and response variables using the training data
        + * -configparam
        +   --x config
        +   -Pass the value of config to the estimation algorithm (deprecated)
        + * -outputsuffix
        + * -
        +   -Suffix to apply to the output variables
        + * -saveoutput
        +   -
        +   - Write output (default = True)
        + * -savemodel
        +   -
        +   -Save the model coefficients and meta-data (default = False)
        + * -warp
        +   -
        +   -Warping function to apply to the responses (blr only)
        +
        +
        +
        +
        +

        Basic usage (scripted)

        +

        The same can be done by importing the estimate function from normative.py. For example, the following code snippet will: (i) mask the nifti data specified in resp_train.nii.gz using the mask specified (which must have the same voxel size as the response variables) (ii) fit a linear normative model to each voxel, (iii) apply this to make predictions using the test covariates and (iv) compute deviation scores and error metrics by comparing against the true test response variables.

        +
        from pcntoolkit.normative import estimate
        +
        +# estimate a normative model
        +estimate("cov_train.txt", "resp_train.nii.gz", maskfile="mask.nii.gz", \
        +        testresp="resp_test.nii.gz", testcov="cov_test.txt", alg="blr")
        +
        +
        +

        The estimate function does all these operations in a single step. In some cases it may be desirable to separate these steps. For example, if a normative model has been estimated on a large dataset, it may be desirable to save the model before applying it to a new dataset (e.g. from a a different site). For example, the following code snippet will first fit a model, then apply it to a set of dummy covariates so that the normative model can be plotted

        +
        from pcntoolkit.normative import estimate, predict
        +
        +# fit a normative model, using training covariates and responses
        +# then apply to test dataset. Saved with file suffix '_estimate'
        +estimate(cov_file_tr, resp_file_tr, testresp=resp_file_te, \
        +        testcov=cov_file_te, alg='blr', optimizer = 'powell', \
        +        savemodel=True, standardize = False)
        +
        +# make predictions on a set of dummy covariates (with no responses)
        +# Saved with file suffix '_predict'
        +yhat, s2 = predict(cov_file_dummy)
        +
        +
        +

        For further information, see the [developer documentation](https://amarquand.github.io/PCNtoolkit/doc/build/html/modindex.html#module-normative). The same can be achieved from the command line, using te -f argument, for example, by specifying -f predict.

        +
        +
        +

        Paralellising estimation to speed things up

        +

        Normative model estimation is typically quite computationally expensive, especially for large datasets. This is exacerbated by high-resolution data (e.g. voxelwise data). For such cases normative model estimation can be paralellised across multiple compute nodes which can be achieved using the normative_parallel.py script. This involves splitting the response matrix into a set of batches, each of a specified size, i.e.:

        +
        +../_images/nm_parallel.png +
        +

        Each of these are then submitted to a cluster and reassembled once the cluster jobs have been completed. The following code snippet illustrates this procedure:

        +
        from pcntoolkit.normative_parallel import execute_nm, collect_nm, delete_nm
        +
        +# General config parameters
        +normative_path = '/<path-to-my>/pcntoolkit/normative.py'
        +python_path='/<path-to-my>/bin/python'
        +working_dir = '/<where-results-will-be_stored>/'
        +log_dir = '/<where-logs-will-be_stored>/'
        +
        +# cluster paramateters
        +job_name = 'nm_demo'   # name for the cluster job
        +batch_size = 10        # number of models (e.g. voxels) per batch
        +memory = '4gb'         # memory required
        +duration = '01:00:00'  # walltime
        +cluster = 'torque'
        +
        +# fit the model. Specifying binary=True means results will be stored in .pkl format
        +execute_nm(working_dir, python_path, normative_path, job_name, cov_file.txt, \
        +        resp_file.pkl, batch_size, memory, duration, cluster_spec=cluster, \
        +        cv_folds=2, log_path=log_dir, binary=True)
        +
        +# wait until jobs complete ...
        +
        +# reassemble results
        +collect_nm(working_dir, job_name, collect=True, binary=True)
        +
        +# remove temporary files
        +delete_nm(working_dir, binary=True)
        +
        +
        +

        At the present time, only ASCII and pickle format are supported using normative parallel. Note also that it may be necessary to customise the script to support your local cluster architecture. This can be done using fairly obvious modifications to the execute_nm() function.

        +
        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/tutorial_CPC2020.html b/doc/build/html/pages/tutorial_CPC2020.html new file mode 100644 index 00000000..5ca9d71d --- /dev/null +++ b/doc/build/html/pages/tutorial_CPC2020.html @@ -0,0 +1,648 @@ + + + + + + + + + + Gaussian Process Regression — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Gaussian Process Regression

        +

        Created by Saige Rutherford, Thomas Wolfers, Mariam Zabihi

        +

        View on GitHub

        +

        Run in Google Colab

        +

        _______________________________________________________________________________

        +
        +

        Background Story

        +

        Morten and Ingrid are concerned about the health of their father, +Nordan. He recently turned 65 years. A few months ago he could not find +his way home. Together, they visit a neurologist/psychiatrist to conduct +a number of cognitive tests. However, those tests were inconclusive. +While Nordan has a relatively low IQ it could not explain his trouble +returning home.

        +

        Recently, the family heard about a new screening technique called +normative modeling with which one can place individuals in reference to +a population norm on for instance measures such as brain volume. Nordan +would like to undertake this procedure to better know what is going on +and to potentially find targets for treatment. Therefore, the family +booked an appointment with you, the normative modeling specialist. To +find out what is going on you compare Nordan’s hyppocampus to the norm +and to a group of persons with Dementia disorders, who have a similar +IQ, age as well as the same sex as Nordan.

        +

        Do your best to get as far as you can. However, you do not need to feel +bad if you cannot complete everything during the tutorial.

        +
        +
        +

        Task 0: Load data and install the pcntoolkit

        +
        #install normative modeling
        +!pip install pcntoolkit
        +
        +
        +

        Option 1: Connect your Google Drive account, and load data from +Google Drive. Having Google Drive connected will allow you to save any +files created back to your Drive folder. This step will require you to +download the csv files from +Github +to your computer, and then make a folder in your Google Drive account +and upload the csv files to this folder.

        +
        from google.colab import drive
        +drive.mount('/content/drive')
        +
        +#change dir to data on your google drive
        +import os
        +os.chdir('drive/My Drive/name-of-folder-where-you-uploaded-csv-files-from-Github/') #Change this path to match the path to your data in Google Drive
        +
        +# code by T. Wolfers
        +
        +
        +

        Option 2: Import the files directly from Github, and skip adding +them to Google Drive.

        +
        !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_demographics.csv
        +!wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_demographics_nordan.csv
        +!wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_features.csv
        +!wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_features_nordan.csv
        +
        +# code by S. Rutherford
        +
        +
        +
        +
        +

        TASK 1: Format input data

        +

        You have four files. The features and demographics file for the +normsample and two files of the same name for Nordan your test sample. +As one of your coworkers has done the preporcessing and quality control +there are more subjects in the demographics file than in the features +file of the norm sample. Please select the overlap of participants +between those two files.

        +

        Question for your understanding:

        +
          +
        1. Why do we have to select the overlap between participants in terms of +featrues and demographics?

        2. +
        +
        import pandas as pd
        +
        +# read in the files.
        +norm_demographics = pd.read_csv('cpc_camcan_demographics.csv',
        +                                sep= ",",
        +                                index_col = 0)
        +norm_features = pd.read_csv('cpc_camcan_features.csv',
        +                            sep=",",
        +                            index_col = 0)
        +
        +# check columns through print [there are other better options]
        +print(norm_demographics)
        +print(norm_features)
        +
        +# find overlap in terms of participants between norm_sample_features and
        +# norm_sample_demographics
        +
        +norm_demographics_features = pd.concat([norm_demographics, norm_features],
        +                                       axis = 1,
        +                                       join = 'inner') # inner checks overlap
        +                                                       # outer combines
        +print(norm_demographics_features)
        +
        +# code by T. Wolfers
        +
        +
        +
        +
        +

        TASK 2: Prepare the covariate_normsample and testresponse_normsample file.

        +

        As mentioned in the introductory presentation those files need a +specific format and the entries need to be seperated by spaces. Use +whatever method you know to prepare those files based on the data +provided in TASK 1. Save those files in .txt format in your drive. Also +get rid of the column names and participant IDs.

        +

        Given that we only have limited time in this practical we have to make a +selection for the features based on your prior knowledge. With the +information in mind that Nordan does not remember his way home, which +subfield of the hyppocampus is probably a good target for the +investigations? Select a maximum of four hyppocampal regions as +features.

        +

        NOTE: Normative modeling is a screening tool we just make this selection +due to time constraints, in reality we build these models on millions of +putative biomarkers that are not restricted to brain imaging.

        +

        Qestions for your understanding:

        +
          +
        1. What is the requirement for the features in terms of variable +properties (e.g. dicotomous or continous)? 3) What is the requirement +for the covariates in terms of these properties? 4) What are the +requirements for both together? 5) How does this depent on the +algorithm used?

        2. +
        +
        # perpare covariate_normsample for sex and age
        +covariate_normsample = norm_demographics_features[['sex',
        +                                                   'age']]
        +
        +covariate_normsample.to_csv('covariate_normsample.txt',
        +                            sep = ' ',
        +                            header = False,
        +                            index = False)
        +
        +# perpare features_normsample for relevant hyppocampal subfields
        +features_normsample = norm_demographics_features[['left_CA1',
        +                                                 'left_CA3',
        +                                                 'right_CA1',
        +                                                 'right_CA3']]
        +
        +features_normsample.to_csv('features_normsample.txt',
        +                           sep = ' ',
        +                           header = False,
        +                           index = False)
        +
        +# code by T. Wolfers
        +
        +
        +
        +
        +

        TASK 3: Estimate normative model

        +

        Once you have prepared and saved all the necessary files. Look at the +pcntoolkit for running normative modeling. Select an appropritate method +set up the toolkit and run your analyses using 2-fold cross validation +in the normsample. Change the output suffix from estimate to ’_2fold’.

        +

        HINT: You primarily need the estimate function.

        +

        SUGGESTION: While this process is running you can go to the next TASK 4, +you will have no doubt when it is correctly running.

        +

        Question for your understaning:

        +
          +
        1. What does cvfolds mean and why do we use it? 7) What is the output of +the estimate function and what does it mean?

        2. +
        +
        import pcntoolkit as pcn
        +
        +# run normative modeling using 2-fold cross-validation
        +
        +pcn.normative.estimate(covfile = 'covariate_normsample.txt',
        +                       respfile = 'features_normsample.txt',
        +                       cvfolds = 2,
        +                       alg = 'gpr',
        +                       outputsuffix = '_2fold')
        +
        +# code by T. Wolfers
        +
        +
        +
        +
        +

        TASK 4: Estimate the forward model of the normative model

        +

        In order to visulize the normative trajectories you first need to run +the forward model. To this end you need to set up an appropriate +covariate_forwardmodel file that covers the age range appropriately for +both sexes. Save this file as .txt . Then you can input the files you +made in TASK 1 as well as the file you made now and run the forward +model using the appropriate specifications.

        +

        Question for your understaning:

        +
          +
        1. What is yhat and ys2? 9) Why does the output of the forward model +does not inlcude the Z-scores?

        2. +
        +
        # create covariate_forwardmodel.txt file
        +covariate_forwardmodel = {'sex': [0, 0, 0, 0, 0, 0, 0,
        +                                  1, 1, 1, 1, 1, 1, 1],
        +                          'age': [20, 30, 40, 50, 60, 70, 80,
        +                                  20, 30, 40, 50, 60, 70, 80]}
        +covariate_forwardmodel = pd.DataFrame(data=covariate_forwardmodel)
        +
        +covariate_forwardmodel.to_csv('covariate_forwardmodel.txt',
        +                           sep = ' ',
        +                           header = False,
        +                           index = False)
        +
        +# estimate forward model
        +pcn.normative.estimate(covfile = 'covariate_normsample.txt',
        +                       respfile = 'features_normsample.txt',
        +                       testcov = 'covariate_forwardmodel.txt',
        +                       cvfolds = None,
        +                       alg = 'gpr',
        +                       outputsuffix = '_forward')
        +
        +# code by T. Wolfers
        +
        +
        +
        +
        +

        TASK 5: Visualize forward model

        +

        Visualize the forward model of the normative model similar to the figure +below.

        +
        +../_images/nm_plot.jpeg +
        +

        HINT: First create a function that calculates the confidence intervals +and then plot yhat, y2 of the forward model. Finally, plot the data of +individual participants.

        +
        import numpy as np
        +import matplotlib.pyplot as plt
        +
        +# confidence interval calculation at x_forward
        +def confidence_interval(s2,x,z):
        +  CI=np.zeros((len(x_forward),4))
        +  for i,xdot in enumerate(x_forward):
        +    ci_inx=np.isin(x,xdot)
        +    S2=s2[ci_inx]
        +    S_hat=np.mean(S2,axis=0)
        +    n=S2.shape[0]
        +    CI[i,:]=z*np.power(S_hat/n,.5)
        +  return CI
        +
        +
        +feature_names=['left_CA1','left_CA3','right_CA1','right_CA3']
        +sex_covariates=[ 'Female','Male']
        +# Creating plots for Female and male
        +for i,sex in enumerate(sex_covariates):
        +#forward model data
        +    forward_yhat = pd.read_csv('yhat_forward.txt', sep = ' ', header=None)
        +    yhat_forward=forward_yhat.values
        +    yhat_forward=yhat_forward[7*i:7*(i+1)]
        +    x_forward=[20, 30, 40, 50, 60, 70, 80]
        +
        +# Find the index of the data exclusively for one sex. Female:0, Male: 1
        +    inx=np.where(covariate_normsample.sex==i)[0]
        +    x=covariate_normsample.values[inx,1]
        +# actual data
        +    y = pd.read_csv('features_normsample.txt', sep = ' ', header=None)
        +    y=y.values[inx]
        +# confidence Interval yhat+ z *(std/n^.5)-->.95 % CI:z=1.96, 99% CI:z=2.58
        +    s2= pd.read_csv('ys2_2fold.txt', sep = ' ', header=None)
        +    s2=s2.values[inx]
        +
        +    CI_95=confidence_interval(s2,x,1.96)
        +    CI_99=confidence_interval(s2,x,2.58)
        +
        +# Creat a trejactroy for each point
        +    for j,name in enumerate(feature_names):
        +         fig=plt.figure()
        +         ax=fig.add_subplot(111)
        +         ax.plot(x_forward,yhat_forward[:,j], linewidth=4, label='Normative trejactory')
        +
        +
        +         ax.plot(x_forward,CI_95[:,j]+yhat_forward[:,j], linewidth=2,linestyle='--',c='g', label='95% confidence interval')
        +         ax.plot(x_forward,-CI_95[:,j]+yhat_forward[:,j], linewidth=2,linestyle='--',c='g')
        +
        +         ax.plot(x_forward,CI_99[:,j]+yhat_forward[:,j], linewidth=1,linestyle='--',c='k', label='99% confidence interval')
        +         ax.plot(x_forward,-CI_99[:,j]+yhat_forward[:,j], linewidth=1,linestyle='--',c='k')
        +
        +         ax.scatter(x,y[:,j],c='r', label=name)
        +         plt.legend(loc='upper left')
        +         plt.title('Normative trejectory of' +name+' in '+sex+' cohort')
        +         plt.show()
        +         plt.close()
        +
        +# code by M. Zabihi
        +
        +
        +
        +
        +

        TASK 6: Apply the normative model to Nordan’s data and the dementia patients.

        +
        # read in Nordan's as well as the patient's demographics and features
        +demographics_nordan = pd.read_csv('cpc_camcan_demographics_nordan.csv',
        +                                       sep= ",",
        +                                       index_col = 0)
        +features_nordan = pd.read_csv('cpc_camcan_features_nordan.csv',
        +                            sep=",",
        +                            index_col = 0)
        +
        +# create a covariate file for Nordan's as well as the patient's demograhpics
        +covariate_nordan = demographics_nordan[['sex',
        +                                        'age']]
        +covariate_nordan.to_csv('covariate_nordan.txt',
        +                        sep = ' ',
        +                        header = False,
        +                        index = False)
        +
        +# create the corresponding feature file
        +features_nordan = features_nordan[['left_CA1',
        +                                  'left_CA3',
        +                                  'right_CA1',
        +                                  'right_CA3']]
        +
        +features_nordan.to_csv('features_nordan.txt',
        +                        sep = ' ',
        +                        header = False,
        +                        index = False)
        +
        +# apply normative modeling
        +pcn.normative.estimate(covfile = 'covariate_normsample.txt',
        +                       respfile = 'features_normsample.txt',
        +                       testcov = 'covariate_nordan.txt',
        +                       testresp = 'features_nordan.txt',
        +                       cvfolds = None,
        +                       alg = 'gpr',
        +                       outputsuffix = '_nordan')
        +
        +# code by T. Wolfers
        +
        +
        +
        +
        +

        TASK 7: In which hyppocampal subfield(s) does Nordan deviate extremely?

        +

        No coding necessary just create a presentation which includes +recommendations to Nordan and his family. Use i) |Z| > 3.6 ii) |Z| > +1.96 as definitions for extreme normative deviations.

        +
        +
        +

        TASK 8 (OPTIONAL): Implement a function that calculates percentage change.

        +

        Percentage change = \frac{x1 - x2}{|x2|}*100

        +
        # function that calculates percentage change
        +def calculate_percentage_change(x1, x2):
        +  percentage_change = ((x1 - x2) / abs(x2)) * 100
        +  return percentage_change
        +
        +# code by T. Wolfers
        +
        +
        +
        +
        +

        TASK 9 (OPTIONAL): Visualize percent change

        +

        Plot the prercentage change in Yhat of the forward model in reference to +age 20. Do that for both sexes seperately.

        +
        import matplotlib.pyplot as plt
        +
        +forward_yhat = pd.read_csv('yhat_forward.txt', sep = ' ', header=None)
        +
        +# You can indicate here which hypocampal subfield you like to visualize
        +hyppocampal_subfield = 0
        +
        +percentage_change_female = []
        +percentage_change_male = []
        +count = 0
        +lengths = len(forward_yhat[hyppocampal_subfield])
        +for entry in forward_yhat[hyppocampal_subfield]:
        +  if count > 0 and count < 7:
        +    loop_percentage_change_female = calculate_percentage_change(entry,
        +                                                                forward_yhat.iloc[0,
        +                                                                                  hyppocampal_subfield])
        +    percentage_change_female.append(loop_percentage_change_female)
        +  elif count > 7:
        +    loop_percentage_change_male = calculate_percentage_change(entry,
        +                                                              forward_yhat.iloc[9,
        +                                                                                hyppocampal_subfield])
        +    percentage_change_male.append(loop_percentage_change_male)
        +  count = count + 1
        +
        +names = ['30 compared to 20 years',
        +         '40 compared to 20 years',
        +         '50 compared to 20 years',
        +         '60 compared to 20 years',
        +         '70 compared to 20 years',
        +         '80 compared to 20 years']
        +
        +# females
        +plt.subplot(121)
        +plt.bar(names, percentage_change_female)
        +plt.xticks(rotation=90)
        +plt.ylim(-20, 2)
        +
        +# males
        +plt.subplot(122)
        +plt.bar(names, percentage_change_male)
        +plt.xticks(rotation=90)
        +plt.ylim(-20, 2)
        +
        +# code by T. Wolfers
        +
        +
        +
        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/tutorial_HBR.html b/doc/build/html/pages/tutorial_HBR.html new file mode 100644 index 00000000..3becb94f --- /dev/null +++ b/doc/build/html/pages/tutorial_HBR.html @@ -0,0 +1,572 @@ + + + + + + + + + + Hierarchical Bayesian Regression — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Hierarchical Bayesian Regression

        +

        Hierarchical Bayesian Regression Normative Modelling and Transfer onto unseen site.

        +

        This notebook will go through basic data preparation (training and +testing set, see Saige’s +tutorial +on Normative Modelling for more detail), the actual training of the +models, and will finally describe how to transfer the trained models +onto unseen sites. The approach is described in detail in these papers:

        + +

        View on GitHub

        +

        While we run everything on a single compute node here, for larger datasets, it is probably desirbel to paralelize this using the normative_parallel functionality.

        +

        Run in Google Colab

        +

        Created by Saige Rutherford, adapted/edited by Andre Marquand and Pierre Berthet

        +
        +

        Step 0: Install necessary libraries & grab data files

        +
        ! pip install numpy scipy arviz pymc3 matplotlib pandas
        +! pip uninstall -y Theano-PyMC  # conflicts with Theano on some environments
        +! pip install pcntoolkit==0.19
        +
        +
        +

        For this tutorial we will use data from the Functional Connectom +Project FCON1000 to create a +multi-site dataset.

        +

        The dataset contains some cortical measures (eg thickness), processed by +Freesurfer 6.0, and some covariates (eg age, site, gender).

        +

        First we import the required package, and create a working directory.

        +
        import os
        +import pandas as pd
        +import pcntoolkit as ptk
        +import numpy as np
        +import pickle
        +from matplotlib import pyplot as plt
        +
        +
        +
        processing_dir = "HBR_demo/"    # replace with a path to your working directory
        +if not os.path.isdir(processing_dir):
        +    os.makedirs(processing_dir)
        +os.chdir(processing_dir)
        +processing_dir = os.getcwd()
        +
        +
        +
        +

        Overview

        +

        Here we get the FCON dataset, remove the ICBM site for later transfer, +assign some site id to the different scanner sites and print an overview +of the left hemisphere mean raw cortical thickness as a function of age, +color coded by the various sites:

        +
        fcon = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000.csv')
        +
        +icbm = fcon.loc[fcon['site'] == 'ICBM']
        +icbm['sitenum'] = 0
        +fcon = fcon.loc[fcon['site'] != 'ICBM']
        +
        +sites = fcon['site'].unique()
        +fcon['sitenum'] = 0
        +
        +f, ax = plt.subplots(figsize=(12, 12))
        +
        +for i,s in enumerate(sites):
        +    idx = fcon['site'] == s
        +    fcon['sitenum'].loc[idx] = i
        +
        +    print('site',s, sum(idx))
        +    ax.scatter(fcon['age'].loc[idx], fcon['lh_MeanThickness_thickness'].loc[idx])
        +
        +ax.legend(sites)
        +ax.set_ylabel('LH mean cortical thickness [mm]')
        +ax.set_xlabel('age')
        +
        +
        +
        +
        +
        +

        Step 1: Prepare training and testing sets

        +

        Then we randomly split half of the samples (participants) to be either +in the training or in the testing samples. We do this for the remaing +FCON dataset and for the ICBM data. The transfer function will also +require a training and a test sample.

        +

        The numbers of samples per sites used for training and for testing are +then displayed.

        +
        tr = np.random.uniform(size=fcon.shape[0]) > 0.5
        +te = ~tr
        +
        +fcon_tr = fcon.loc[tr]
        +fcon_te = fcon.loc[te]
        +
        +tr = np.random.uniform(size=icbm.shape[0]) > 0.5
        +te = ~tr
        +
        +icbm_tr = icbm.loc[tr]
        +icbm_te = icbm.loc[te]
        +
        +print('sample size check')
        +for i,s in enumerate(sites):
        +    idx = fcon_tr['site'] == s
        +    idxte = fcon_te['site'] == s
        +    print(i,s, sum(idx), sum(idxte))
        +
        +# Uncomment the following lines if you want to keep a defined version of the sets
        +# fcon_tr.to_csv('/Users/andmar/data/sairut/data/fcon1000_tr.csv')
        +# fcon_te.to_csv('/Users/andmar/data/sairut/data/fcon1000_te.csv')
        +# icbm_tr.to_csv('/Users/andmar/data/sairut/data/fcon1000_icbm_tr.csv')
        +# icbm_te.to_csv('/Users/andmar/data/sairut/data/fcon1000_icbm_te.csv')
        +
        +
        +

        Otherwise you can just load these pre defined subsets:

        +
        # Optional
        +fcon_tr = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_tr.csv')
        +fcon_te = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_te.csv')
        +icbm_tr = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_icbm_tr.csv')
        +icbm_te = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_icbm_te.csv')
        +
        +
        +
        +
        +

        Step 2: Configure HBR inputs: covariates, measures and batch effects

        +

        We will here only use the mean cortical thickness for the Right and Left +hemisphere: two idps.

        +
        idps = ['rh_MeanThickness_thickness','lh_MeanThickness_thickness']
        +
        +
        +

        As input to the model, we need covariates (used to describe predictable +source of variability (fixed effects), here ‘age’), measures (here +cortical thickness on two idps), and batch effects (random source of +variability, here ‘scanner site’ and ‘sex’).

        +

        X corresponds to the covariate(s)

        +

        Y to the measure(s)

        +

        batch_effects to the random effects

        +

        We need these values both for the training (_train) and for the +testing set (_test).

        +
        X_train = (fcon_tr['age']/100).to_numpy(dtype=float)
        +Y_train = fcon_tr[idps].to_numpy(dtype=float)
        +batch_effects_train = fcon_tr[['sitenum','sex']].to_numpy(dtype=int)
        +
        +with open('X_train.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(X_train), file)
        +with open('Y_train.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(Y_train), file)
        +with open('trbefile.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(batch_effects_train), file)
        +
        +
        +X_test = (fcon_te['age']/100).to_numpy(dtype=float)
        +Y_test = fcon_te[idps].to_numpy(dtype=float)
        +batch_effects_test = fcon_te[['sitenum','sex']].to_numpy(dtype=int)
        +
        +with open('X_test.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(X_test), file)
        +with open('Y_test.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(Y_test), file)
        +with open('tsbefile.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(batch_effects_test), file)
        +
        +# a simple function to quickly load pickle files
        +def ldpkl(filename: str):
        +    with open(filename, 'rb') as f:
        +        return pickle.load(f)
        +
        +
        +
        +
        +

        Step 3: Files and Folders grooming

        +
        respfile = os.path.join(processing_dir, 'Y_train.pkl')       # measurements  (eg cortical thickness) of the training samples (columns: the various features/ROIs, rows: observations or subjects)
        +covfile = os.path.join(processing_dir, 'X_train.pkl')        # covariates (eg age) the training samples (columns: covariates, rows: observations or subjects)
        +
        +testrespfile_path = os.path.join(processing_dir, 'Y_test.pkl')       # measurements  for the testing samples
        +testcovfile_path = os.path.join(processing_dir, 'X_test.pkl')        # covariate file for the testing samples
        +
        +trbefile = os.path.join(processing_dir, 'trbefile.pkl')      # training batch effects file (eg scanner_id, gender)  (columns: the various batch effects, rows: observations or subjects)
        +tsbefile = os.path.join(processing_dir, 'tsbefile.pkl')      # testing batch effects file
        +
        +output_path = os.path.join(processing_dir, 'Models/')    #  output path, where the models will be written
        +log_dir = os.path.join(processing_dir, 'log/')           #
        +if not os.path.isdir(output_path):
        +    os.mkdir(output_path)
        +if not os.path.isdir(log_dir):
        +    os.mkdir(log_dir)
        +
        +outputsuffix = '_estimate'      # a string to name the output files, of use only to you, so adapt it for your needs.
        +
        +
        +
        +
        +

        Step 4: Estimating the models

        +

        Now we have everything ready to estimate the normative models. The +estimate function only needs the training and testing sets, each +divided in three datasets: covariates, measures and batch effects. We +obviously specify alg=hbr to use the hierarchical bayesian +regression method, well suited for the multi sites datasets. The +remaining arguments are basic data management: where the models, logs, +and output files will be written and how they will be named.

        +
        ptk.normative.estimate(covfile=covfile,
        +                       respfile=respfile,
        +                       tsbefile=tsbefile,
        +                       trbefile=trbefile,
        +                       alg='hbr',
        +                       log_path=log_dir,
        +                       binary=True,
        +                       output_path=output_path, testcov= testcovfile_path,
        +                       testresp = testrespfile_path,
        +                       outputsuffix=outputsuffix, savemodel=True)
        +
        +
        +

        Here some analyses can be done, there are also some error metrics that +could be of interest. This is covered in step 6 and in Saige’s +tutorial +on Normative Modelling.

        +
        +
        +

        Step 5: Transfering the models to unseen sites

        +

        Similarly to what was done before for the FCON data, we also need to +prepare the ICBM specific data, in order to run the transfer function: +training and testing set of covariates, measures and batch effects:

        +
        X_adapt = (icbm_tr['age']/100).to_numpy(dtype=float)
        +Y_adapt = icbm_tr[idps].to_numpy(dtype=float)
        +batch_effects_adapt = icbm_tr[['sitenum','sex']].to_numpy(dtype=int)
        +
        +with open('X_adaptation.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(X_adapt), file)
        +with open('Y_adaptation.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(Y_adapt), file)
        +with open('adbefile.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(batch_effects_adapt), file)
        +
        +# Test data (new dataset)
        +X_test_txfr = (icbm_te['age']/100).to_numpy(dtype=float)
        +Y_test_txfr = icbm_te[idps].to_numpy(dtype=float)
        +batch_effects_test_txfr = icbm_te[['sitenum','sex']].to_numpy(dtype=int)
        +
        +with open('X_test_txfr.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(X_test_txfr), file)
        +with open('Y_test_txfr.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(Y_test_txfr), file)
        +with open('txbefile.pkl', 'wb') as file:
        +    pickle.dump(pd.DataFrame(batch_effects_test_txfr), file)
        +
        +
        +
        respfile = os.path.join(processing_dir, 'Y_adaptation.pkl')
        +covfile = os.path.join(processing_dir, 'X_adaptation.pkl')
        +testrespfile_path = os.path.join(processing_dir, 'Y_test_txfr.pkl')
        +testcovfile_path = os.path.join(processing_dir, 'X_test_txfr.pkl')
        +trbefile = os.path.join(processing_dir, 'adbefile.pkl')
        +tsbefile = os.path.join(processing_dir, 'txbefile.pkl')
        +
        +log_dir = os.path.join(processing_dir, 'log_transfer/')
        +output_path = os.path.join(processing_dir, 'Transfer/')
        +model_path = os.path.join(processing_dir, 'Models/')  # path to the previously trained models
        +outputsuffix = '_transfer'  # suffix added to the output files from the transfer function
        +
        +
        +

        Here, the difference is that the transfer function needs a model path, +which points to the models we just trained, and new site data (training +and testing). That is basically the only difference.

        +
        yhat, s2, z_scores = ptk.normative.transfer(covfile=covfile,
        +                                            respfile=respfile,
        +                                            tsbefile=tsbefile,
        +                                            trbefile=trbefile,
        +                                            model_path = model_path,
        +                                            alg='hbr',
        +                                            log_path=log_dir,
        +                                            binary=True,
        +                                            output_path=output_path,
        +                                            testcov= testcovfile_path,
        +                                            testresp = testrespfile_path,
        +                                            outputsuffix=outputsuffix,
        +                                            savemodel=True)
        +
        +
        +

        And that is it, you now have models that benefited from prior knowledge +about different scanner sites to learn on unseen sites.

        +
        +
        +

        Step 6: Interpreting model performance

        +

        Output evaluation metrics definitions

        + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        key value

        Description

        yhat

        predictive mean

        ys2

        predictive variance

        nm

        normative model

        Z

        deviance scores

        Rho

        Pearson correlation between true and predicted responses

        pRho

        parametric p-value for this correlation

        RMSE

        root mean squared error between true/predicted responses

        SMSE

        standardised mean squared error

        EV

        explained variance

        MSLL

        mean standardized log loss See page 23

        +
        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/tutorial_ROIcorticalthickness.html b/doc/build/html/pages/tutorial_ROIcorticalthickness.html new file mode 100644 index 00000000..5c8f631a --- /dev/null +++ b/doc/build/html/pages/tutorial_ROIcorticalthickness.html @@ -0,0 +1,888 @@ + + + + + + + + + + Bayesian Linear Regression — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Bayesian Linear Regression

        +

        Normative Modeling Tutorial Using Multi-Site Cortical Thickness Data and Bayesian Linear Regression.

        +

        This notebook will prepare the data for normative modelling (assembling +data matrices from different datasets, preparing the covariates etc).

        +

        View on GitHub

        +

        Run in Google Colab

        +

        Created by Saige Rutherford

        +
        +

        Step 0: Install necessary libraries & grab data files

        +
        ! git clone https://github.com/predictive-clinical-neuroscience/PCNtoolkit-demo.git
        +
        +
        +
        import os
        +
        +
        +
        # set this path to the git cloned PCNtoolkit-demo repository --> Uncomment whichever line you need for either running on your own computer or on Google Colab.
        +#os.chdir('/Users/saigerutherford/repos/PCNtoolkit-demo/') # if running on your own computer, use this line (but obvi change the path)
        +#os.chdir('PCNtoolkit-demo/') # if running on Google Colab, use this line
        +
        +
        +
        ! pip install -r requirements.txt
        +
        +
        +
        +
        +

        Step 1: Prepare covariate data

        +

        For this tutorial we will use data from the Human Connectome Project +Young Adult +study, +CAMCAN, and +IXI to create a +multi-site dataset.

        +

        Our first step is to prepare and combine the covariate (age & sex) data +from each site.

        +
        import pandas as pd
        +import numpy as np
        +import matplotlib.pyplot as plt
        +import seaborn as sns
        +import joypy
        +from sklearn.model_selection import train_test_split
        +from pcntoolkit.normative import estimate, evaluate
        +from pcntoolkit.utils import create_bspline_basis, compute_MSLL
        +
        +
        +
        hcp = pd.read_csv('data/HCP1200_age_gender.csv')
        +cam = pd.read_csv('data/cam_age_gender.csv')
        +ixi = pd.read_csv('data/IXI_age_gender.csv')
        +
        +
        +
        cam_hcp = pd.merge(hcp, cam, how='outer')
        +
        +
        +
        cov = pd.merge(cam_hcp, ixi, how='outer')
        +
        +
        +
        sns.set(font_scale=1.5, style='darkgrid')
        +
        +
        +
        sns.displot(cov, x="age", hue="site", multiple="stack", height=6)
        +
        +
        +
        cov.groupby(['site']).describe()
        +
        +
        +
        +
        +

        Step 2: Prepare brain data

        +

        Next we will format and combine the MRI data. We are using cortical +thickness maps that are created by running recon-all from Freesurfer 6. +We need to merge together the left and right hemisphere text files for +each site, and then combine the different sites into a single dataframe. +We reduce the dimensionality of our data by using ROIs from the +Desikan-Killiany atlas.

        +

        Here is some psuedo-code (run from a terminal in the folder that has all +subject’s recon-all output folders) that was used to extract these ROIs:

        +
        export SUBJECTS_DIR=/path/to/study/freesurfer_data/
        +aparcstats2table --subject sub-* --hemi lh --meas thickness --tablefile HCP1200_aparc_lh_thickness.txt
        +aparcstats2table --subject sub-* --hemi rh --meas thickness --tablefile HCP1200_aparc_rh_thickness.txt
        +
        +
        +
        cam = pd.read_csv('data/CAMCAN_aparc_thickness.csv')
        +hcpya = pd.read_csv('data/HCP1200_aparc_thickness.csv')
        +ixi = pd.read_csv('data/IXI_aparc_thickness.csv')
        +
        +
        +
        hcpya_cam = pd.merge(hcpya, cam, how='outer')
        +
        +
        +
        brain_all = pd.merge(ixi, hcpya_cam, how='outer')
        +
        +
        +

        We also want to include the Euler +number as a +covariate. So we extracted the euler number from each subject’s +recon-all output folder into a text file and we now need to format and +combine these into our brain dataframe.

        +
        hcp_euler = pd.read_csv('data/hcp-ya_euler.csv')
        +cam_euler = pd.read_csv('data/cam_euler.csv')
        +ixi_euler = pd.read_csv('data/ixi_euler.csv')
        +
        +
        +
        hcp_euler['site'] = 'hcp'
        +cam_euler['site'] = 'cam'
        +ixi_euler['site'] = 'ixi'
        +
        +
        +
        hcp_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True)
        +cam_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True)
        +ixi_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True)
        +
        +
        +
        hcp_euler.dropna(inplace=True)
        +cam_euler.dropna(inplace=True)
        +ixi_euler.dropna(inplace=True)
        +
        +
        +
        hcp_euler['rh_euler'] = hcp_euler['rh_euler'].astype(int)
        +hcp_euler['lh_euler'] = hcp_euler['lh_euler'].astype(int)
        +cam_euler['rh_euler'] = cam_euler['rh_euler'].astype(int)
        +cam_euler['lh_euler'] = cam_euler['lh_euler'].astype(int)
        +ixi_euler['rh_euler'] = ixi_euler['rh_euler'].astype(int)
        +ixi_euler['lh_euler'] = ixi_euler['lh_euler'].astype(int)
        +
        +
        +
        hcp_cam_euler = pd.merge(hcp_euler, cam_euler, how='outer')
        +
        +
        +
        df_euler = pd.merge(ixi_euler, hcp_cam_euler, how='outer')
        +
        +
        +

        Finally, we need to center the euler number for each site. The euler +number is very site-specific so in order to use the same exclusion +threshold across sites we need to center the site by subtracting the +site median from all subjects at a site. Then we will take the square +root and multiply by negative one and exclude any subjects with a square +root above 10. This choice of threshold is fairly random. If possible +all of your data should be visually inspected to verify that the data +inclusion is not too strict or too lenient.

        +
        df_euler['avg_euler'] = df_euler[['lh_euler','rh_euler']].mean(axis=1)
        +
        +
        +
        df_euler.groupby(by='site').median()
        +
        +
        +
        df_euler['site_median'] = df_euler['site']
        +
        +
        +
        df_euler['site_median'] = df_euler['site_median'].replace({'hcp':-43,'cam':-61,'ixi':-56})
        +
        +
        +
        df_euler['avg_euler_centered'] = df_euler['avg_euler'] - df_euler['site_median']
        +
        +
        +
        df_euler['avg_euler_centered_neg'] = df_euler['avg_euler_centered']*-1
        +
        +
        +
        df_euler['avg_euler_centered_neg_sqrt'] = np.sqrt(np.absolute(df_euler['avg_euler_centered_neg']))
        +
        +
        +
        pd.set_option('display.max_rows', 500)
        +pd.set_option('display.max_columns', 500)
        +pd.set_option('display.width', 1000)
        +#create a color gradent function to be used in the colormap parameter
        +def color_gradient(x=0.0, start=(0, 0, 0), stop=(1, 1, 1)):
        +    r = np.interp(x, [0, 1], [start[0], stop[0]])
        +    g = np.interp(x, [0, 1], [start[1], stop[1]])
        +    b = np.interp(x, [0, 1], [start[2], stop[2]])
        +    return r, g, b#show the table
        +#plot the figure
        +plt.figure(dpi=380)
        +fig, axes = joypy.joyplot(df_euler, column=['avg_euler_centered_neg_sqrt'], overlap=2.5, by="site", ylim='own', fill=True, figsize=(6,6)
        +                          , legend=False, xlabels=True, ylabels=True, colormap=lambda x: color_gradient(x, start=(.08, .45, .8),stop=(.8, .34, .44))
        +                          , alpha=0.6, linewidth=.5, linecolor='w', fade=True)
        +plt.title('sqrt(-Euler Number), median centered', fontsize=18, color='black', alpha=1)
        +plt.xlabel('sqrt(-Euler number)', fontsize=14, color='black', alpha=1)
        +plt.ylabel('Site', fontsize=14, color='black', alpha=1)
        +plt.show
        +
        +
        +
        brain = pd.merge(df_euler, brain_all, how='inner')
        +
        +
        +
        len(brain)
        +
        +
        +
        brain_good = brain.query('avg_euler_centered_neg_sqrt < 10')
        +
        +
        +
        len(brain_good)
        +
        +
        +

        We lose 63 subjects because they have a large euler number.

        +
        +
        +

        Step 3: Combine covariate & cortical thickness dataframes

        +

        Even though the normative modeling code needs the covariate and features +(cortical thickness) in separate text files, we first need to merge them +together to make sure that we have the same subjects in each file and +that the rows (representing subjects) align.

        +
        # make sure to use how="inner" so that we only include subjects that have data in both the covariate and the cortical thickness files
        +all_data = pd.merge(brain_good, cov, how='inner')
        +
        +
        +
        +
        +

        Step 4: Format dataframes to run normative models

        +
        from sklearn.model_selection import train_test_split
        +
        +
        +
        # Remove any subjects that have NaN variables in any of the columns
        +all_data.dropna(subset=['lh_bankssts_thickness',
        +       'lh_caudalanteriorcingulate_thickness',
        +       'lh_caudalmiddlefrontal_thickness', 'lh_cuneus_thickness',
        +       'lh_entorhinal_thickness', 'lh_fusiform_thickness',
        +       'lh_inferiorparietal_thickness', 'lh_inferiortemporal_thickness',
        +       'lh_isthmuscingulate_thickness', 'lh_lateraloccipital_thickness',
        +       'lh_lateralorbitofrontal_thickness', 'lh_lingual_thickness',
        +       'lh_medialorbitofrontal_thickness', 'lh_middletemporal_thickness',
        +       'lh_parahippocampal_thickness', 'lh_paracentral_thickness',
        +       'lh_parsopercularis_thickness', 'lh_parsorbitalis_thickness',
        +       'lh_parstriangularis_thickness', 'lh_pericalcarine_thickness',
        +       'lh_postcentral_thickness', 'lh_posteriorcingulate_thickness',
        +       'lh_precentral_thickness', 'lh_precuneus_thickness',
        +       'lh_rostralanteriorcingulate_thickness',
        +       'lh_rostralmiddlefrontal_thickness', 'lh_superiorfrontal_thickness',
        +       'lh_superiorparietal_thickness', 'lh_superiortemporal_thickness',
        +       'lh_supramarginal_thickness', 'lh_frontalpole_thickness',
        +       'lh_temporalpole_thickness', 'lh_transversetemporal_thickness',
        +       'lh_insula_thickness', 'lh_MeanThickness_thickness',
        +       'rh_bankssts_thickness', 'rh_caudalanteriorcingulate_thickness',
        +       'rh_caudalmiddlefrontal_thickness', 'rh_cuneus_thickness',
        +       'rh_entorhinal_thickness', 'rh_fusiform_thickness',
        +       'rh_inferiorparietal_thickness', 'rh_inferiortemporal_thickness',
        +       'rh_isthmuscingulate_thickness', 'rh_lateraloccipital_thickness',
        +       'rh_lateralorbitofrontal_thickness', 'rh_lingual_thickness',
        +       'rh_medialorbitofrontal_thickness', 'rh_middletemporal_thickness',
        +       'rh_parahippocampal_thickness', 'rh_paracentral_thickness',
        +       'rh_parsopercularis_thickness', 'rh_parsorbitalis_thickness',
        +       'rh_parstriangularis_thickness', 'rh_pericalcarine_thickness',
        +       'rh_postcentral_thickness', 'rh_posteriorcingulate_thickness',
        +       'rh_precentral_thickness', 'rh_precuneus_thickness',
        +       'rh_rostralanteriorcingulate_thickness',
        +       'rh_rostralmiddlefrontal_thickness', 'rh_superiorfrontal_thickness',
        +       'rh_superiorparietal_thickness', 'rh_superiortemporal_thickness',
        +       'rh_supramarginal_thickness', 'rh_frontalpole_thickness',
        +       'rh_temporalpole_thickness', 'rh_transversetemporal_thickness',
        +       'rh_insula_thickness', 'rh_MeanThickness_thickness','age','sex'], inplace=True)
        +
        +
        +

        Separate the covariate & features into their own dataframes

        +
        all_data_features = all_data[['lh_bankssts_thickness',
        +       'lh_caudalanteriorcingulate_thickness',
        +       'lh_caudalmiddlefrontal_thickness', 'lh_cuneus_thickness',
        +       'lh_entorhinal_thickness', 'lh_fusiform_thickness',
        +       'lh_inferiorparietal_thickness', 'lh_inferiortemporal_thickness',
        +       'lh_isthmuscingulate_thickness', 'lh_lateraloccipital_thickness',
        +       'lh_lateralorbitofrontal_thickness', 'lh_lingual_thickness',
        +       'lh_medialorbitofrontal_thickness', 'lh_middletemporal_thickness',
        +       'lh_parahippocampal_thickness', 'lh_paracentral_thickness',
        +       'lh_parsopercularis_thickness', 'lh_parsorbitalis_thickness',
        +       'lh_parstriangularis_thickness', 'lh_pericalcarine_thickness',
        +       'lh_postcentral_thickness', 'lh_posteriorcingulate_thickness',
        +       'lh_precentral_thickness', 'lh_precuneus_thickness',
        +       'lh_rostralanteriorcingulate_thickness',
        +       'lh_rostralmiddlefrontal_thickness', 'lh_superiorfrontal_thickness',
        +       'lh_superiorparietal_thickness', 'lh_superiortemporal_thickness',
        +       'lh_supramarginal_thickness', 'lh_frontalpole_thickness',
        +       'lh_temporalpole_thickness', 'lh_transversetemporal_thickness',
        +       'lh_insula_thickness', 'lh_MeanThickness_thickness',
        +       'rh_bankssts_thickness', 'rh_caudalanteriorcingulate_thickness',
        +       'rh_caudalmiddlefrontal_thickness', 'rh_cuneus_thickness',
        +       'rh_entorhinal_thickness', 'rh_fusiform_thickness',
        +       'rh_inferiorparietal_thickness', 'rh_inferiortemporal_thickness',
        +       'rh_isthmuscingulate_thickness', 'rh_lateraloccipital_thickness',
        +       'rh_lateralorbitofrontal_thickness', 'rh_lingual_thickness',
        +       'rh_medialorbitofrontal_thickness', 'rh_middletemporal_thickness',
        +       'rh_parahippocampal_thickness', 'rh_paracentral_thickness',
        +       'rh_parsopercularis_thickness', 'rh_parsorbitalis_thickness',
        +       'rh_parstriangularis_thickness', 'rh_pericalcarine_thickness',
        +       'rh_postcentral_thickness', 'rh_posteriorcingulate_thickness',
        +       'rh_precentral_thickness', 'rh_precuneus_thickness',
        +       'rh_rostralanteriorcingulate_thickness',
        +       'rh_rostralmiddlefrontal_thickness', 'rh_superiorfrontal_thickness',
        +       'rh_superiorparietal_thickness', 'rh_superiortemporal_thickness',
        +       'rh_supramarginal_thickness', 'rh_frontalpole_thickness',
        +       'rh_temporalpole_thickness', 'rh_transversetemporal_thickness',
        +       'rh_insula_thickness', 'rh_MeanThickness_thickness']]
        +
        +
        +
        all_data_covariates = all_data[['age','sex','site']]
        +
        +
        +

        Right now, the sites are coded in a single column using a string. We +need to instead dummy encode the site variable so that there is a column +for each site and the columns contain binary variables (0/1). Luckily +pandas has a nice built in function, pd.get_dummies to help us +format the site column this way!

        +
        all_data_covariates = pd.get_dummies(all_data_covariates, columns=['site'])
        +
        +
        +
        all_data['Average_Thickness'] = all_data[['lh_MeanThickness_thickness','rh_MeanThickness_thickness']].mean(axis=1)
        +
        +
        +

        Take a sneak peak to see if there are any super obvious site effects. If +there were, we would see a large separation in the fitted regression +line for each site.

        +
        sns.set_theme(style="darkgrid",font_scale=1.5)
        +c = sns.lmplot(data=all_data, x="age", y="Average_Thickness", hue="site", height=6)
        +plt.ylim(1.5, 3.25)
        +plt.xlim(15, 95)
        +plt.show()
        +
        +
        +
        +

        Create train/test split

        +

        We will use 80% of the data for training and 20% for testing. We +stratify our train/test split using the site variable to make sure that +the train/test sets both contain data from all sites. The model wouldn’t +learn the site effects if all of the data from one site was only in the +test set.

        +
        X_train, X_test, y_train, y_test = train_test_split(all_data_covariates, all_data_features, stratify=all_data['site'], test_size=0.2, random_state=42)
        +
        +
        +

        Verify that your train & test arrays are the same size

        +
        tr_cov_size = X_train.shape
        +tr_resp_size = y_train.shape
        +te_cov_size = X_test.shape
        +te_resp_size = y_test.shape
        +print("Train covariate size is: ", tr_cov_size)
        +print("Test covariate size is: ", te_cov_size)
        +print("Train response size is: ", tr_resp_size)
        +print("Test response size is: ", te_resp_size)
        +
        +
        +

        Save out each ROI to its own file:

        +

        We setup the normative model so that for each Y (brain region) we fit a +separate model. While the estimate function in the pcntoolkit can handle +having all of the Y’s in a single text file, for this tutorial we are +going to organize our Y’s so that they are each in their own text file +and directory.

        +
        os.chdir('/Users/saigerutherford/repos/PCNToolkit-demo/')
        +
        +
        +
        cd data/
        +
        +
        +
        for c in y_train.columns:
        +    y_train[c].to_csv('resp_tr_' + c + '.txt', header=False, index=False)
        +
        +
        +
        X_train.to_csv('cov_tr.txt', sep = '\t', header=False, index = False)
        +
        +
        +
        y_train.to_csv('resp_tr.txt', sep = '\t', header=False, index = False)
        +
        +
        +
        for c in y_test.columns:
        +    y_test[c].to_csv('resp_te_' + c + '.txt', header=False, index=False)
        +
        +
        +
        X_test.to_csv('cov_te.txt', sep = '\t', header=False, index = False)
        +
        +
        +
        y_test.to_csv('resp_te.txt', sep = '\t', header=False, index = False)
        +
        +
        +
        ! if [[ ! -e data/ROI_models/ ]]; then mkdir data/ROI_models; fi
        +! if [[ ! -e data/covariate_files/ ]]; then mkdir data/covariate_files; fi
        +! if [[ ! -e data/response_files/ ]]; then mkdir data/response_files; fi
        +
        +
        +
        ! for i in `cat data/roi_dir_names`; do cd data/ROI_models; mkdir ${i}; cd ../../; cp resp_tr_${i}.txt data/ROI_models/${i}/resp_tr.txt; cp resp_te_${i}.txt data/ROI_models/${i}/resp_te.txt; cp cov_tr.txt data/ROI_models/${i}/cov_tr.txt; cp cov_te.txt data/ROI_models/${i}/cov_te.txt; done
        +
        +
        +
        ! mv resp_*.txt data/response_files/
        +
        +
        +
        ! mv cov_t*.txt data/covariate_files/
        +
        +
        +
        +
        +
        +

        Step 5: Run normative model

        +
        # set this path to wherever your ROI_models folder is located (where you copied all of the covariate & response text files to in Step 4)
        +data_dir = '/Users/saigerutherford/repos/PCNToolkit-demo/data/ROI_models/'
        +
        +
        +
        # Create a list of all the ROIs you want to run a normative model for
        +roi_ids = ['lh_MeanThickness_thickness',
        +           'rh_MeanThickness_thickness',
        +           'lh_bankssts_thickness',
        +           'lh_caudalanteriorcingulate_thickness',
        +           'lh_superiorfrontal_thickness',
        +           'rh_superiorfrontal_thickness']
        +
        +
        +

        When we split the data into train and test sets, we did not reset the +index. This means that the row numbers in the train/test matrices are +still the same as before splitting the data. We will need the test set +row numbers of which subjects belong to which site in order to evaluate +per site performance metrics, so we need to reset the row numbers in the +train/test split matrices.

        +
        x_col_names = ['age', 'sex', 'site_cam', 'site_hcp', 'site_ixi']
        +X_train = pd.read_csv('data/covariate_files/cov_tr.txt', sep='\t', header=None, names=x_col_names)
        +X_test = pd.read_csv('data/covariate_files/cov_te.txt', sep='\t', header=None, names=x_col_names)
        +y_train = pd.read_csv('data/response_files/resp_tr.txt', sep='\t', header=None)
        +y_test = pd.read_csv('data/response_files/resp_te.txt', sep='\t', header=None)
        +
        +
        +
        X_train.reset_index(drop=True, inplace=True)
        +X_test.reset_index(drop=True, inplace=True)
        +y_train.reset_index(drop=True, inplace=True)
        +y_test.reset_index(drop=True, inplace=True)
        +
        +
        +

        Extract site indices:

        +

        Get site ids so that we can evaluate the test metrics independently for +each site

        +
        cam_idx = X_test.index[X_test['site_cam' ]== 1].to_list()
        +hcp_idx = X_test.index[X_test['site_hcp'] == 1].to_list()
        +ixi_idx = X_test.index[X_test['site_ixi'] == 1].to_list()
        +
        +# Save the site indices into a single list
        +sites = [cam_idx, hcp_idx, ixi_idx]
        +
        +# Create a list with sites names to use in evaluating per-site metrics
        +site_names = ['cam', 'hcp', 'ixi']
        +
        +
        +

        Basis expansion:

        +

        Now, we set up a B-spline basis set that allows us to perform nonlinear +regression using a linear model. This basis is deliberately chosen to +not to be too flexible so that in can only model relatively slowly +varying trends. To increase the flexibility of the model you can change +the parameterisation (e.g. by adding knot points to the Bspline basis or +increasing the order of the interpolating polynomial).

        +

        Note that in the neuroimaging literature, it is more common to use a +polynomial basis expansion for this. Piecewise polynomials like +B-splines are superior because they do not introduce a global curvature. +See the reference below for further information.

        +

        Primer on regression +splines

        +

        Reference for why polynomials are a bad +idea

        +
        # Create a cubic B-spline basis (used for regression)
        +xmin = 10#16 # xmin & xmax are the boundaries for ages of participants in the dataset
        +xmax = 95#90
        +B = create_bspline_basis(xmin, xmax)
        +
        +# create the basis expansion for the covariates for each of the
        +for roi in roi_ids:
        +    print('Creating basis expansion for ROI:', roi)
        +    roi_dir = os.path.join(data_dir, roi)
        +    os.chdir(roi_dir)
        +
        +    # create output dir
        +    os.makedirs(os.path.join(roi_dir,'blr'), exist_ok=True)
        +
        +    # load train & test covariate data matrices
        +    X_tr = np.loadtxt(os.path.join(roi_dir, 'cov_tr.txt'))
        +    X_te = np.loadtxt(os.path.join(roi_dir, 'cov_te.txt'))
        +
        +    # add intercept column
        +    X_tr = np.concatenate((X_tr, np.ones((X_tr.shape[0],1))), axis=1)
        +    X_te = np.concatenate((X_te, np.ones((X_te.shape[0],1))), axis=1)
        +    np.savetxt(os.path.join(roi_dir, 'cov_int_tr.txt'), X_tr)
        +    np.savetxt(os.path.join(roi_dir, 'cov_int_te.txt'), X_te)
        +
        +    # create Bspline basis set
        +    Phi = np.array([B(i) for i in X_tr[:,0]])
        +    Phis = np.array([B(i) for i in X_te[:,0]])
        +    X_tr = np.concatenate((X_tr, Phi), axis=1)
        +    X_te = np.concatenate((X_te, Phis), axis=1)
        +    np.savetxt(os.path.join(roi_dir, 'cov_bspline_tr.txt'), X_tr)
        +    np.savetxt(os.path.join(roi_dir, 'cov_bspline_te.txt'), X_te)
        +
        +
        +

        Prepare output structures:

        +
        # Create pandas dataframes with header names to save out the overall and per-site model evaluation metrics
        +blr_metrics = pd.DataFrame(columns = ['ROI', 'MSLL', 'EV', 'SMSE', 'RMSE', 'Rho'])
        +blr_site_metrics = pd.DataFrame(columns = ['ROI', 'site', 'y_mean', 'y_var', 'yhat_mean', 'yhat_var', 'MSLL', 'EV', 'SMSE', 'RMSE', 'Rho'])
        +
        +
        +

        Estimate the normative models:

        +

        In this step, we estimate the normative models one at a time. In +principle we could also do this on the whole data matrix at once +(e.g. with the response variables stored in a n_subjects x +n_brain_measures numpy array). However, doing it this way gives us some +extra flexibility in that it does not require that the subjects are +exactly the same for each of the brain measures.

        +

        This code fragment will loop through each region of interest in the +roi_ids list (set a few code blocks above) using Bayesian linear +regression and evaluate the model on the independent test set. It will +then compute error metrics such as the explained variance, mean +standardized log loss and Pearson correlation between true and predicted +test responses separately for each scanning site.

        +

        We supply the estimate function with a few specific arguments that are +worthy of commenting on: * alg = ‘blr’ : specifies we should use +Bayesian linear regression * optimizer = ‘powell’ : use Powell’s +derivative-free optimization method (faster in this case than L-BFGS) * +savemodel = False : do not write out the final estimated model to disk +* saveoutput = False : return the outputs directly rather than writing +them to disk * standardize = False : Do not standardize the covariates +or response variables

        +

        One important consideration is whether or not to standardize. Whilst +this generally only has a minor effect on the final model accuracy, it +has implications for the interpretation of models and how they are +configured. If the covariates and responses are both standardized, the +model will return standardized coefficients. If (as in this case) the +response variables are not standardized, then the scaling both +covariates and responses will be reflected in the estimated +coefficients. Also, under the linear modelling approach employed here, +if the coefficients are unstandardized and do not have a zero mean, it +is necessary to add an intercept column to the design matrix. This is +done in the code block above.

        +
        # Loop through ROIs
        +for roi in roi_ids:
        +    print('Running ROI:', roi)
        +    roi_dir = os.path.join(data_dir, roi)
        +    os.chdir(roi_dir)
        +
        +    # configure the covariates to use. Change *_bspline_* to *_int_* to
        +    cov_file_tr = os.path.join(roi_dir, 'cov_bspline_tr.txt')
        +    cov_file_te = os.path.join(roi_dir, 'cov_bspline_te.txt')
        +
        +    # load train & test response files
        +    resp_file_tr = os.path.join(roi_dir, 'resp_tr.txt')
        +    resp_file_te = os.path.join(roi_dir, 'resp_te.txt')
        +
        +    # run a basic model
        +    yhat_te, s2_te, nm, Z, metrics_te = estimate(cov_file_tr,
        +                                                 resp_file_tr,
        +                                                 testresp=resp_file_te,
        +                                                 testcov=cov_file_te,
        +                                                 alg = 'blr',
        +                                                 optimizer = 'powell',
        +                                                 savemodel = False,
        +                                                 saveoutput = False,
        +                                                 standardize = False)
        +    # display and save metrics
        +    print('EV=', metrics_te['EXPV'][0])
        +    print('RHO=', metrics_te['Rho'][0])
        +    print('MSLL=', metrics_te['MSLL'][0])
        +    blr_metrics.loc[len(blr_metrics)] = [roi, metrics_te['MSLL'][0], metrics_te['EXPV'][0], metrics_te['SMSE'][0],
        +                                         metrics_te['RMSE'][0], metrics_te['Rho'][0]]
        +
        +    # Compute metrics per site in test set, save to pandas df
        +    # load true test data
        +    X_te = np.loadtxt(cov_file_te)
        +    y_te = np.loadtxt(resp_file_te)
        +    y_te = y_te[:, np.newaxis] # make sure it is a 2-d array
        +
        +    # load training data (required to compute the MSLL)
        +    y_tr = np.loadtxt(resp_file_tr)
        +    y_tr = y_tr[:, np.newaxis]
        +
        +    for num, site in enumerate(sites):
        +        y_mean_te_site = np.array([[np.mean(y_te[site])]])
        +        y_var_te_site = np.array([[np.var(y_te[site])]])
        +        yhat_mean_te_site = np.array([[np.mean(yhat_te[site])]])
        +        yhat_var_te_site = np.array([[np.var(yhat_te[site])]])
        +
        +        metrics_te_site = evaluate(y_te[site], yhat_te[site], s2_te[site], y_mean_te_site, y_var_te_site)
        +
        +        site_name = site_names[num]
        +        blr_site_metrics.loc[len(blr_site_metrics)] = [roi, site_names[num],
        +                                                       y_mean_te_site[0],
        +                                                       y_var_te_site[0],
        +                                                       yhat_mean_te_site[0],
        +                                                       yhat_var_te_site[0],
        +                                                       metrics_te_site['MSLL'][0],
        +                                                       metrics_te_site['EXPV'][0],
        +                                                       metrics_te_site['SMSE'][0],
        +                                                       metrics_te_site['RMSE'][0],
        +                                                       metrics_te_site['Rho'][0]]
        +
        +
        +
        os.chdir(data_dir)
        +
        +
        +
        # Save per site test set metrics variable to CSV file
        +blr_site_metrics.to_csv('blr_site_metrics.csv', index=False, index_label=None)
        +
        +
        +
        # Save overall test set metrics to CSV file
        +blr_metrics.to_csv('blr_metrics.csv', index=False, index_label=None)
        +
        +
        +
        +
        +

        Step 6: Interpreting model performance

        +

        Output evaluation metrics definitions

        + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        key value

        Description

        yhat

        predictive mean

        ys2

        predictive variance

        nm

        normative model

        Z

        deviance scores

        Rho

        Pearson correlation between true and predicted responses

        pRho

        parametric p-value for this correlation

        RMSE

        root mean squared error between true/predicted responses

        SMSE

        standardised mean squared error

        EV

        explained variance

        MSLL

        mean standardized log loss See page 23

        +
        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/pages/updates.html b/doc/build/html/pages/updates.html new file mode 100644 index 00000000..b7308e9e --- /dev/null +++ b/doc/build/html/pages/updates.html @@ -0,0 +1,242 @@ + + + + + + + + + + List of updates — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + + + + +
        +
        +
        +
        + +
        +

        Updates

        +
        + + +
        + +
        + +
        +
        + +
        + +
        + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/py-modindex.html b/doc/build/html/py-modindex.html index 6bd7b8c2..b8194882 100644 --- a/doc/build/html/py-modindex.html +++ b/doc/build/html/py-modindex.html @@ -1,53 +1,193 @@ - - - - - - - Python Module Index — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + Python Module Index — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + - - - -
        -
        -
        -
        + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + +
          + +
        • »
        • + +
        • Python Module Index
        • + + +
        • + +
        • + +
        + + +
        +
        +
        +

        Python Module Index

        @@ -69,7 +209,7 @@

        Python Module Index

        - bayesreg + bayesreg   @@ -77,7 +217,7 @@

        Python Module Index

        - fileio + fileio   @@ -85,7 +225,7 @@

        Python Module Index

        - gp + gp   @@ -93,12 +233,12 @@

        Python Module Index

        - normative + normative - normative_parallel + normative_parallel   @@ -106,7 +246,7 @@

        Python Module Index

        - rfa + rfa   @@ -114,7 +254,7 @@

        Python Module Index

        - trendsurf + trendsurf   @@ -122,47 +262,52 @@

        Python Module Index

        - utils + util -
        +
        +
        +
        + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand. + +

        +
        + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
        - -
        -
        - - - + + + +
        + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/search.html b/doc/build/html/search.html index b42bd795..86afe31a 100644 --- a/doc/build/html/search.html +++ b/doc/build/html/search.html @@ -1,100 +1,251 @@ - - - - - - - Search — Predictive Clinical Neuroscience Toolkit 0.17 documentation - - + + + + + + + + Search — Predictive Clinical Neuroscience Toolkit 0.20 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + + - - - - - - - - - -
        -
        -
        -
        + + + + + + +
        + + + +
        + + + + + +
        + +
        + + + + + + + + + + + + + + + + + + + +
        + +
          + +
        • »
        • + +
        • Search
        • + + +
        • + +
        • + +
        + + +
        +
        +
        +
        -

        Search

        + +
        -
        +
        +
        +
        + +
        + +
        +

        + © Copyright 2020, Andre F. Marquand. + +

        +
        + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
        - -
        -
        - - - + + + +
        + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/build/html/searchindex.js b/doc/build/html/searchindex.js index 1073c19b..8ca4b538 100644 --- a/doc/build/html/searchindex.js +++ b/doc/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["index","modindex"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["index.rst","modindex.rst"],objects:{"":{bayesreg:[1,0,0,"-"],fileio:[1,0,0,"-"],gp:[1,0,0,"-"],normative:[1,0,0,"-"],normative_parallel:[1,0,0,"-"],rfa:[1,0,0,"-"],trendsurf:[1,0,0,"-"],utils:[1,0,0,"-"]},"bayesreg.BLR":{dloglik:[1,2,1,""],estimate:[1,2,1,""],loglik:[1,2,1,""],post:[1,2,1,""],predict:[1,2,1,""]},"gp.CovBase":{cov:[1,2,1,""],dcov:[1,2,1,""],get_n_params:[1,2,1,""]},"gp.CovLin":{cov:[1,2,1,""],dcov:[1,2,1,""],get_n_params:[1,2,1,""]},"gp.CovSqExp":{cov:[1,2,1,""],dcov:[1,2,1,""],get_n_params:[1,2,1,""]},"gp.CovSqExpARD":{cov:[1,2,1,""],dcov:[1,2,1,""],get_n_params:[1,2,1,""]},"gp.CovSum":{cov:[1,2,1,""],dcov:[1,2,1,""],get_n_params:[1,2,1,""]},"gp.GPR":{dloglik:[1,2,1,""],estimate:[1,2,1,""],loglik:[1,2,1,""],post:[1,2,1,""],predict:[1,2,1,""]},"rfa.GPRRFA":{dloglik:[1,2,1,""],estimate:[1,2,1,""],get_n_params:[1,2,1,""],loglik:[1,2,1,""],post:[1,2,1,""],predict:[1,2,1,""]},"utils.CustomCV":{split:[1,2,1,""]},"utils.WarpAffine":{df:[1,2,1,""],f:[1,2,1,""],get_n_params:[1,2,1,""],invf:[1,2,1,""],warp_predictions:[1,2,1,""]},"utils.WarpBase":{df:[1,2,1,""],f:[1,2,1,""],get_n_params:[1,2,1,""],invf:[1,2,1,""],warp_predictions:[1,2,1,""]},"utils.WarpBoxCox":{df:[1,2,1,""],f:[1,2,1,""],get_n_params:[1,2,1,""],invf:[1,2,1,""],warp_predictions:[1,2,1,""]},"utils.WarpCompose":{df:[1,2,1,""],f:[1,2,1,""],get_n_params:[1,2,1,""],invf:[1,2,1,""],warp_predictions:[1,2,1,""]},"utils.WarpSinArcsinh":{df:[1,2,1,""],f:[1,2,1,""],get_n_params:[1,2,1,""],invf:[1,2,1,""],warp_predictions:[1,2,1,""]},bayesreg:{BLR:[1,1,1,""]},fileio:{alphanum_key:[1,3,1,""],create_mask:[1,3,1,""],file_extension:[1,3,1,""],file_stem:[1,3,1,""],file_type:[1,3,1,""],load:[1,3,1,""],load_ascii:[1,3,1,""],load_cifti:[1,3,1,""],load_nifti:[1,3,1,""],load_pd:[1,3,1,""],predictive_interval:[1,3,1,""],save:[1,3,1,""],save_ascii:[1,3,1,""],save_cifti:[1,3,1,""],save_nifti:[1,3,1,""],save_pd:[1,3,1,""],sort_nicely:[1,3,1,""],tryint:[1,3,1,""],vol2vec:[1,3,1,""]},gp:{CovBase:[1,1,1,""],CovLin:[1,1,1,""],CovSqExp:[1,1,1,""],CovSqExpARD:[1,1,1,""],CovSum:[1,1,1,""],GPR:[1,1,1,""]},normative:{estimate:[1,3,1,""],evaluate:[1,3,1,""],extend:[1,3,1,""],fit:[1,3,1,""],get_args:[1,3,1,""],load_response_vars:[1,3,1,""],main:[1,3,1,""],predict:[1,3,1,""],save_results:[1,3,1,""],transfer:[1,3,1,""]},normative_parallel:{bashwrap_nm:[1,3,1,""],collect_nm:[1,3,1,""],delete_nm:[1,3,1,""],execute_nm:[1,3,1,""],qsub_nm:[1,3,1,""],rerun_nm:[1,3,1,""],sbatch_nm:[1,3,1,""],sbatchwrap_nm:[1,3,1,""],split_nm:[1,3,1,""]},rfa:{GPRRFA:[1,1,1,""]},trendsurf:{create_basis:[1,3,1,""],estimate:[1,3,1,""],get_args:[1,3,1,""],load_data:[1,3,1,""],main:[1,3,1,""],write_nii:[1,3,1,""]},utils:{CustomCV:[1,1,1,""],FDR:[1,3,1,""],WarpAffine:[1,1,1,""],WarpBase:[1,1,1,""],WarpBoxCox:[1,1,1,""],WarpCompose:[1,1,1,""],WarpSinArcsinh:[1,1,1,""],bashwrap:[1,3,1,""],calibration_error:[1,3,1,""],compute_MSLL:[1,3,1,""],compute_pearsonr:[1,3,1,""],create_bspline_basis:[1,3,1,""],create_poly_basis:[1,3,1,""],divergence_plot:[1,3,1,""],explained_var:[1,3,1,""],extreme_value_prob:[1,3,1,""],extreme_value_prob_fit:[1,3,1,""],load_freesurfer_measure:[1,3,1,""],qsub:[1,3,1,""],ravel_2D:[1,3,1,""],simulate_data:[1,3,1,""],squared_dist:[1,3,1,""],threshold_NPM:[1,3,1,""],unravel_2D:[1,3,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function"},terms:{"001":1,"025":1,"100":1,"118":1,"1981":1,"2006":1,"2009":1,"2017":1,"2019":1,"4gb":1,"500mb":1,"975":1,"abstract":1,"boolean":1,"case":1,"class":1,"default":1,"function":1,"int":1,"new":1,"return":1,"true":1,For:1,That:1,The:1,These:1,Using:1,_estim:1,abs:1,accord:1,account:1,accross:1,adapt:1,add:1,added:1,affin:1,after:1,alg:1,algorithm:1,all:1,allow:1,alpha:1,alphanum_kei:1,also:1,altern:1,ani:1,appli:1,approach:1,appropri:1,approxim:1,arcsin:1,arcsinh:1,ard:1,area:1,arg:1,argument:1,arrai:1,articl:1,ascii:1,aseg:1,assum:1,atla:1,automat:1,averag:1,base:1,bash:1,bash_environ:1,bashwrap:1,bashwrap_nm:1,basi:1,basic:1,batch:1,batch_siz:1,bayesian:1,bayesreg:1,below:1,beta:1,between:1,bicken:1,binari:1,biometrika:1,bishop:1,blr:1,both:1,box:1,brain:1,bspline:1,cal_level:1,calibration_error:1,can:1,cell:1,cfold:1,check:1,cifti:1,cluster:1,coef:1,coeffici:1,collect:1,collect_nm:1,column:1,com:1,combin:1,command:1,common:1,compat:1,composit:1,composition:1,comput:1,compute_msl:1,compute_pearsonr:1,configparam:1,configur:1,conjug:1,contain:1,content:0,control:1,convenent:1,correl:1,cortic:1,count:1,cov:1,cov_forward:1,covari:1,covariat:1,covbas:1,covfil:1,covfile_path:1,covfunc:1,covfuncnam:1,covfunct:1,covlin:1,covsqexp:1,covsqexpard:1,covsum:1,cox:1,creat:1,create_basi:1,create_bspline_basi:1,create_mask:1,create_poly_basi:1,creation:1,cross:1,cubic:1,current:1,curvatur:1,curvind:1,custer:1,custom:1,customcv:1,cv_fold:1,cvfold:1,dat:1,data:1,data_arrai:1,data_path:1,datafil:1,datafram:1,datafrma:1,dataset:1,dcov:1,decid:1,defaul:1,defin:1,degre:1,delet:1,delete_nm:1,delimit:1,depend:1,deriv:1,design:1,desir:1,destrieux:1,determin:1,devianc:1,deviat:1,diag:1,diagon:1,differ:1,dimension:1,diment:1,dimpoli:1,dir:1,directori:1,discoveri:1,disk:1,distribut:1,divergence_plot:1,dloglik:1,doe:1,doksum:1,duation:1,durat:1,each:1,effect:1,either:1,ell:1,ell_1:1,ell_d:1,ell_i:1,endpoint:1,entri:1,enviorn:1,environ:[],epsilon:1,error:1,estim:1,evalu:1,exampl:1,examplenii:1,execut:1,execute_nm:1,exp:1,exp_var:1,expans:1,explain:1,explained_var:1,explainedvar:1,explicit:1,exponenti:1,expv:1,extend:1,extra_argu:1,extreme_value_prob:1,extreme_value_prob_fit:1,fail:1,fals:1,fdr:1,fdr_thr:1,featur:1,field:1,file:1,file_extens:1,file_stem:1,file_typ:1,fileio:1,filenam:1,fit:1,fold:1,folder:1,foldind:1,follow:1,format:1,formul:1,forward:1,freesurefr:1,freesurf:1,from:1,full:1,func:1,gari:1,gassian:1,gauscurv:1,gaussian:1,gener:1,get_arg:1,get_n_param:1,given:1,gpml:1,gpr:1,gprrfa:1,gradient:1,grayvol:1,group:1,grp_id_test:1,grp_id_train:1,has:1,have:1,hbr:1,hetero_gaussian:1,howev:1,http:1,hyp0:1,hyp:1,hyperbol:1,hyperparamet:1,hyperparmat:1,identifi:1,imag:1,implement:1,includ:1,independ:1,index:0,indic:1,inform:1,input:1,instal:1,integ:1,integr:1,interest:1,intermedi:1,intern:1,interpol:1,interv:1,intialis:1,intrins:1,invers:1,invf:1,isigma:1,itself:1,jasa:1,job:1,job_id:1,job_nam:1,job_path:1,jone:1,just:1,keep:1,kia:1,knot:1,kurtosi:1,kwarg:1,lambda:1,lambda_a:1,lamda:1,larg:1,latent:1,lbfg:1,learn:1,length:1,lengthscal:1,leptokurt:1,likelihood:1,limit:1,line:1,linear:1,list:1,load:1,load_ascii:1,load_cifti:1,load_data:1,load_freesurfer_measur:1,load_nifti:1,load_pd:1,load_response_var:1,log:1,log_ell_d:1,log_path:1,logdir:1,loglik:1,loss:1,machin:1,mai:1,main:1,make:1,manual:1,map:1,margin:1,marquand:1,mask:1,maskfil:1,maskvol:1,match:1,matric:1,matrix:1,matter:1,mean:1,meancurv:1,measur:1,median:1,memori:1,mere:1,mesokurt:1,metadata:1,method:1,metric:1,miss:1,missing_sub:1,mll:1,model:1,model_path:1,model_select:1,modul:0,more:1,mostli:1,mother:1,motherfunct:[],msll:1,much:1,multipl:1,must:1,n_feat:1,n_featur:1,n_grp:1,n_iter:1,n_sampl:1,name:1,necessari:[],neg:1,negloglik:1,network:1,neural:1,neuroimag:1,nifti:1,nknot:1,nois:1,noise_vari:1,non:1,none:1,norm:1,normative_parallel:1,normative_path:1,notat:1,note:1,npm:1,npm_thr:1,nte:1,number:1,numpi:1,numvert:1,object:1,onc:1,one:1,onli:1,optim:1,optimis:1,option:1,order:1,ordinari:1,otherwis:1,ouptut:1,ouput:1,output:1,output_path:1,outputal:1,outputsuffix:1,over:1,p_valu:1,packeag:1,page:0,panda:1,parallel:1,param:1,paramet:1,parameteris:1,parametr:1,parametris:1,pars:1,particular:[],pass:1,path:1,pathfor:1,pattern:1,pcntoolkit:1,pearson:1,perc:1,percentil:1,pewsei:1,pii:1,pkl:1,platykurt:1,plot:1,polynomi:1,posit:1,possibl:1,post:1,posterior:1,pre:1,precis:1,pred_interv:1,predict:1,predictive_interv:1,prepar:1,prho:1,primarili:1,prior:1,process:1,processing_dir:1,provid:1,pythin:1,python:1,python_path:1,qsub:1,qsub_nm:1,quantiti:1,random:1,random_st:1,rasmussen:1,rate:1,ravel_2d:1,recognit:1,rectifi:1,refer:1,regress:1,relev:1,report:1,requir:1,rerun:1,rerun_nm:1,reshap:1,respect:1,respfil:1,respfile_path:1,respon:1,respons:1,responsefil:1,result:1,retriev:1,rfa:1,rho:1,rio:1,rmse:1,rmtmp:1,root:1,routin:1,row:1,run:1,s0893608019301856:1,s2_forward:1,same:1,sampl:1,save:1,save_ascii:1,save_cifti:1,save_nifti:1,save_path:1,save_pd:1,save_result:1,saveoutput:1,sbatch_nm:1,sbatchwrap_nm:1,scalar:1,scheme:1,scienc:1,sciencedirect:1,scikit:1,scipt:1,score:1,script:1,script_command:1,search:0,second:1,see:1,segment:1,self:1,sesst:[],set:1,sf2:1,should:1,sigma:1,sigma_a:1,sign:1,signal:1,signific:1,simul:1,simulate_data:1,sin:1,singl:1,sinh:1,size:1,skew:1,sklearn:1,smse:1,sn2:1,sort_nic:1,sourc:1,space:1,specif:1,specifi:1,spline:1,split:1,split_nm:1,springer:1,squar:1,squared_dist:1,standard:1,standardis:1,start:1,stat:1,state:1,statist:1,std:1,step:1,store:1,string:1,strutur:1,sub:1,subcort:1,subcortical_volum:1,subject:1,subjects_list:1,submiss:1,submit:1,sum:1,support:1,surfac:1,surfarea:1,symmetr:1,synthet:1,system:1,tab:1,take:1,target:1,tes:1,test:1,testcov:1,testcovfile_path:1,testresp:1,testrespfile_path:1,testrespons:1,text:1,theparticular:1,theta:1,thi:1,thickavg:1,thickstd:1,thinck:1,thoma:1,those:1,threshold_npm:1,thu:1,tol:1,toolbox:1,torab:1,torqu:1,train:1,train_mean:1,train_var:1,transfer:1,transform:1,trbefil:1,trend:1,trendcoeff:1,trendsurf:1,tryint:1,two:1,txt:1,type:1,under:1,unravel_2d:1,updat:1,usag:1,use:1,used:1,useful:1,uses:1,using:1,util:1,valid:1,valu:1,var_group:1,var_groups_test:1,variabl:1,varianc:1,vector:1,verbos:1,vertic:1,vol2vec:1,vol:1,volatla:1,volum:1,voxel:1,want:1,warp:1,warp_predict:1,warpaffin:1,warpbas:1,warpboxcox:1,warpcompos:1,warpnam:1,warpsinarcsinh:1,when:1,where:1,which:1,william:1,wise:1,witten:1,wolfer:1,work:1,working_dir:1,wrap:1,wrapper:1,write:1,write_nii:1,written:1,www:1,x_test:1,x_train:1,xcov:1,xmax:1,xmin:1,y_test:1,y_train:1,yhat:1,yield:1,ylim:1,your:[],ypred:1,ypred_var:1,ys2:1,ytrue:1,ytrue_sig:1,zero:1},titles:["Predictive Clinical Neuroscience toolkit","Module Index"],titleterms:{clinic:0,index:1,indic:0,method:[],modul:1,neuroimag:[],neurosci:0,predict:0,spatial:[],tabl:0,toolkit:0}}) \ No newline at end of file +Search.setIndex({docnames:["_templates/class","_templates/function","index","modindex","pages/FAQs","pages/acknowledgements","pages/citing","pages/glossary","pages/installation","pages/modindex","pages/pcntoolkit_background","pages/references","pages/tutorial_CPC2020","pages/tutorial_HBR","pages/tutorial_ROIcorticalthickness","pages/updates"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["_templates/class.rst","_templates/function.rst","index.rst","modindex.rst","pages/FAQs.rst","pages/acknowledgements.rst","pages/citing.rst","pages/glossary.rst","pages/installation.rst","pages/modindex.rst","pages/pcntoolkit_background.rst","pages/references.rst","pages/tutorial_CPC2020.rst","pages/tutorial_HBR.rst","pages/tutorial_ROIcorticalthickness.rst","pages/updates.rst"],objects:{"":{bayesreg:[9,0,0,"-"],fileio:[9,0,0,"-"],gp:[9,0,0,"-"],normative:[9,0,0,"-"],normative_parallel:[9,0,0,"-"],rfa:[9,0,0,"-"],trendsurf:[9,0,0,"-"],util:[3,0,0,"-"]},"bayesreg.BLR":{dloglik:[9,2,1,""],estimate:[9,2,1,""],loglik:[9,2,1,""],penalized_loglik:[9,2,1,""],post:[9,2,1,""],predict:[9,2,1,""],predict_and_adjust:[9,2,1,""]},"gp.CovBase":{cov:[9,2,1,""],dcov:[9,2,1,""],get_n_params:[9,2,1,""]},"gp.CovLin":{cov:[9,2,1,""],dcov:[9,2,1,""],get_n_params:[9,2,1,""]},"gp.CovSqExp":{cov:[9,2,1,""],dcov:[9,2,1,""],get_n_params:[9,2,1,""]},"gp.CovSqExpARD":{cov:[9,2,1,""],dcov:[9,2,1,""],get_n_params:[9,2,1,""]},"gp.CovSum":{cov:[9,2,1,""],dcov:[9,2,1,""],get_n_params:[9,2,1,""]},"gp.GPR":{dloglik:[9,2,1,""],estimate:[9,2,1,""],loglik:[9,2,1,""],post:[9,2,1,""],predict:[9,2,1,""]},"rfa.GPRRFA":{dloglik:[9,2,1,""],estimate:[9,2,1,""],get_n_params:[9,2,1,""],loglik:[9,2,1,""],post:[9,2,1,""],predict:[9,2,1,""]},bayesreg:{BLR:[9,1,1,""]},fileio:{alphanum_key:[9,3,1,""],create_mask:[9,3,1,""],file_extension:[9,3,1,""],file_stem:[9,3,1,""],file_type:[9,3,1,""],load:[9,3,1,""],load_ascii:[9,3,1,""],load_cifti:[9,3,1,""],load_nifti:[9,3,1,""],load_pd:[9,3,1,""],predictive_interval:[9,3,1,""],save:[9,3,1,""],save_ascii:[9,3,1,""],save_cifti:[9,3,1,""],save_nifti:[9,3,1,""],save_pd:[9,3,1,""],sort_nicely:[9,3,1,""],tryint:[9,3,1,""],vol2vec:[9,3,1,""]},gp:{CovBase:[9,1,1,""],CovLin:[9,1,1,""],CovSqExp:[9,1,1,""],CovSqExpARD:[9,1,1,""],CovSum:[9,1,1,""],GPR:[9,1,1,""]},normative:{estimate:[9,3,1,""],evaluate:[9,3,1,""],extend:[9,3,1,""],fit:[9,3,1,""],get_args:[9,3,1,""],load_response_vars:[9,3,1,""],main:[9,3,1,""],predict:[9,3,1,""],save_results:[9,3,1,""],transfer:[9,3,1,""]},normative_parallel:{bashwrap_nm:[9,3,1,""],collect_nm:[9,3,1,""],delete_nm:[9,3,1,""],execute_nm:[9,3,1,""],qsub_nm:[9,3,1,""],rerun_nm:[9,3,1,""],sbatch_nm:[9,3,1,""],sbatchwrap_nm:[9,3,1,""],split_nm:[9,3,1,""]},rfa:{GPRRFA:[9,1,1,""]},trendsurf:{create_basis:[9,3,1,""],estimate:[9,3,1,""],get_args:[9,3,1,""],load_data:[9,3,1,""],main:[9,3,1,""],write_nii:[9,3,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function"},terms:{"00036":[6,11],"001":[3,9],"002":[6,11],"009":[6,11],"01047":[6,11],"01057":[6,11],"013":[6,11],"0146":[6,11],"017":[6,11],"019":[6,11],"020":[6,11],"023":[6,11],"030":[6,11],"0441":[6,11],"100":[3,9,12,13],"1000":14,"1002":[6,11],"1007":[6,11],"1016":[6,11],"1017":[6,11],"1038":[6,11],"1101":[6,11],"111":12,"121":12,"122":12,"134":[6,11],"1415":[6,11],"1424":[6,11],"148":[6,11],"161":[6,11],"1806":[6,11],"1808":[6,11],"2006":[3,9],"2007":10,"2015":[6,11],"2016":[6,11],"2017":[6,10,11],"2018":[6,10,11],"2019":[6,11],"2020":[6,10,11,13],"2021":[6,11,13],"25386":[6,11],"314":[6,11],"323":[6,11],"380":14,"3_68":[6,11],"433":[6,11],"438429":[6,11],"447":[6,11],"4gb":[3,9,10],"500":14,"500mb":[3,9],"552":[6,11],"561":[6,11],"567":[6,11],"578":[6,11],"59728":[6,11],"699":[6,11],"709":[6,11],"978":[6,11],"abstract":[3,9],"aln\u00e6":[6,11],"b\u00f6lte":[6,11],"case":[3,6,9,10,11,14],"class":[2,3,9],"default":[3,9,10],"export":14,"final":[12,13,14],"float":13,"function":[2,3,6,8,9,10,11,13,14],"import":[8,10,12,13,14],"int":[13,14],"new":[3,9,10,12,13],"public":10,"return":[3,9,12,13,14],"super":14,"true":[3,9,10,13,14],"var":14,"while":[12,13,14],And:13,Eds:[6,11],For:[3,8,9,10,13,14],IDs:12,One:14,That:[3,9,13],The:[3,9,10,12,13,14],Then:[12,13,14],There:[3,9,10],These:[3,9,10],Use:12,Useful:2,Using:[6,11,14],With:12,_2fold:12,_______________________________________________________________________________:12,__file__:8,_bspline_:14,_estim:[3,9,10,13],_forward:12,_int_:14,_nordan:12,_predict:10,_test:13,_train:13,_transfer:13,abolmaesumi:[6,11],about:[12,13],abov:[10,14],abs:[6,11,12],absolut:14,accord:[3,9],account:[3,9,12],accross:[3,9],accuraci:14,achiev:10,acknowledg:2,across:[10,14],activ:8,actual:[12,13],adapt:[3,9,13],adbefil:13,add:[3,9,14],add_subplot:12,added:13,adding:[12,14],addit:10,addition:10,adhd:[6,11],adjust:[3,9],adult:14,after:[3,9],against:10,agartz:[6,11],age:[10,12,13,14],ages:14,ago:12,aim:10,alg:[3,9,10,12,13,14],algorithm:[3,9,10,12],align:14,all:[3,9,10,12,14],all_data:14,all_data_covari:14,all_data_featur:14,allow:[10,12,14],along:10,alpha:[3,9,14],alphanum_kei:[3,9],also:[3,9,10,12,13,14],altern:[3,9,10],alwai:[3,9,10],amarquand:8,anaconda3:8,anaconda:8,analys:[12,13],anatomi:[6,11],andmar:13,andr:[5,13],andreassen:[6,11],ani:[3,9,12,14],aparcstats2t:14,appeal:10,append:12,appli:[3,9,10],applic:10,appoint:12,approach:[3,6,9,10,11,13,14],appropri:[3,9,12],approprit:12,approxim:[3,9,10],architectur:10,ard:[3,9],arena:[6,11],arg:[3,9],argument:[3,9,10,13,14],arrai:[3,9,14],arviz:13,arxiv:[6,11],ascii:[3,9,10],ask:2,assembl:14,assign:13,assist:[6,11],associ:[3,9],assum:[3,9],astyp:14,atla:14,autism:[6,11],automat:[3,9,10],avail:8,averag:[6,11],average_thick:14,avg_eul:14,avg_euler_cent:14,avg_euler_centered_neg:14,avg_euler_centered_neg_sqrt:14,axes:14,axi:[12,14],back:12,background:2,bad:[12,14],banaschewski:[6,11],bar:12,baron:[6,11],base:[3,6,9,11,12],bash:[3,9],bashwrap_nm:[3,9],basi:[3,6,9,10,11,14],basic:[3,9,13,14],batch:[3,9,10],batch_effect:13,batch_effects_adapt:13,batch_effects_test:13,batch_effects_test_txfr:13,batch_effects_train:13,batch_siz:[3,9,10],bayesian:[2,3,6,9,10,11],bayesreg:[3,9],be_stor:10,becaus:[10,14],beckmann:[6,11],been:10,befor:[10,13,14],behaviour:[6,11],being:10,belong:14,below:[3,9,10,12,14],benefici:10,benefit:13,berthet:[6,11,13],best:12,beta:[3,9],better:12,between:[3,9,12,13,14],beyond:[6,11],bfg:14,big:[6,11],bin:10,binari:[3,9,10,13,14],biolog:[6,10,11],biomark:12,biopsych:[6,11],biorxiv:[6,11],bipolar:[6,11],bishop:[3,9],bla:8,black:14,block:14,blr:[3,9,10,14],blr_metric:14,blr_site_metr:14,book:12,both:[3,9,12,13,14],boundari:14,bourgeron:[6,11],bpsc:[6,11],brain:[6,11,12],brain_al:14,brain_good:14,bspline:14,build:12,built:14,buitelaar:[6,11],cach:8,calculate_percentage_chang:12,calibr:[3,9],call:[10,12],cam:14,cam_age_gend:14,cam_eul:14,cam_hcp:14,cam_idx:14,camcan:14,camcan_aparc_thick:14,can:[3,8,9,10,12,13,14],cannot:12,cat:14,cell:[3,9],center:14,centil:10,cfold:[3,9],challeng:5,chang:14,charlott:5,charman:[6,11],chdir:[12,13,14],check:[3,9,12,13],choic:14,chosen:14,ci_95:12,ci_99:12,ci_inx:12,cifti:[3,9,10],cite:2,clinic:[6,10,11,13,14],clone:[8,14],close:12,cluster:[3,9,10],cluster_spec:10,code:[10,12,13,14],coeffici:[3,9,10,14],cognit:[6,11,12],cohen:[6,11],cohort:[6,11,12],colab:[12,13,14],collect:[3,9,10],collect_nm:[3,9,10],color:[13,14],color_gradi:14,colormap:14,column:[3,9,10,12,13,14],com:[8,12,13,14],combin:[3,9,12],command:[3,8,9],comment:14,common:[3,9,14],compar:[10,12],complet:[10,12],comput:[3,6,9,10,11,12,13,14],computation:10,compute_msl:14,concat:12,concaten:14,conceptu:[6,10,11],concern:12,conda:8,conduct:12,confid:12,confidence_interv:12,config:10,configparam:[3,9,10],configur:[3,9,14],conflict:13,conjug:[3,9],connect:[6,11,12],connectom:[13,14],consid:6,consider:14,constraint:12,contain:[3,9,10,13,14],content:12,contin:12,control:[3,6,9,11,12],convent:10,copi:14,core:5,correctli:12,correl:[3,9,13,14],correspond:[10,12,13],cortic:[6,10,11,13],corticostriat:[6,11],could:[3,9,12,13,14],council:5,count:12,cov:[3,9,10,14],cov_bspline_t:14,cov_bspline_tr:14,cov_fil:10,cov_file_dummi:10,cov_file_t:[10,14],cov_file_tr:[10,14],cov_forward:[3,9],cov_int_t:14,cov_int_tr:14,cov_t:14,cov_test:10,cov_tr:14,cov_train:10,covari:[3,8,9,10,12],covariat:[3,9],covariate_fil:14,covariate_forwardmodel:12,covariate_nordan:12,covbas:[3,9],cover:[12,13],covfil:[3,9,12,13],covfile_path:[3,9],covfunc:[3,9,10],covfuncnam:[3,9],covfunct:[3,9],covlin:[3,9],covsqexp:[3,9],covsqexpard:[3,9],covsum:[3,9],cowork:12,cpc_2020:12,cpc_camcan_demograph:12,cpc_camcan_demographics_nordan:12,cpc_camcan_featur:12,cpc_camcan_features_nordan:12,creat:[3,8,9,10,12,13],create_basi:[3,9],create_bspline_basi:14,create_mask:[3,9],creation:[3,9],cross:[3,9,10,12],csv:[10,12,13,14],cubic:14,current:[2,3,9],curvatur:14,custer:[3,9],custom:[3,9],customis:10,cv_fold:[3,9,10],cvfold:[3,9,10,12],darkgrid:14,dat:[3,9],data:[3,6,9,11],data_arrai:[3,9],data_dir:14,datafil:[3,9],datafram:[12,13],dataset:[3,9,10,13,14],dcov:[3,9],decid:[3,9],def:[12,13,14],defin:[3,9,13],definit:[12,13,14],degre:[3,9],delet:[3,9],delete_nm:[3,9,10],deliber:14,delimit:[3,9],demo:[13,14],demograhp:12,demograph:[10,12],demographics_nordan:12,depend:[3,8,9],depent:12,deprec:10,deriv:[3,9,10,14],describ:[13,14],descript:[10,13,14],design:[10,14],desikan:14,desir:10,desirbel:13,detail:13,determin:[3,9],develop:[5,10],devianc:[3,9,13,14],deviat:[3,6,9,10,11],df_euler:14,diag:[3,9],diagnosi:10,dicotom:12,did:14,differ:[3,6,9,10,11,13,14],digit:5,dimens:10,dimension:14,dinga:[5,6,11],dir:[3,8,9,12,14],direct:[6,11],directli:[3,9,12,14],directori:[3,9,13,14],disk:[3,9,10,14],disord:[6,11,12],displai:[13,14],displot:14,dissect:[6,11],distribut:[3,9,10],divid:13,dloglik:[3,9],doc:2,document:10,doe:[10,14],doi:[6,11],doing:14,done:[3,9,10,12,13,14],doubt:12,download:12,dpi:14,drive:12,drop:14,dropna:14,dtseri:10,dtype:13,duation:[3,9],due:12,duma:[6,11],dummi:[10,14],dump:13,durat:[3,9,10],dure:12,durston:[6,11],dutch:5,each:[3,9,10,12,13,14],ecker:[6,11],edit:13,effect:[3,9,14],effici:10,either:[3,9,10,13,14],elif:12,ell:[3,9],ell_1:[3,9],ell_d:[3,9],ell_i:[3,9],emploi:14,encod:14,end:12,entri:[10,12],enumer:[12,13,14],env_nam:8,enviorn:8,environ:[8,13],error:[3,9,10,13,14],especi:10,essenti:10,estim:[3,9,14],etc:14,euler:14,evalu:[3,9,10,13,14],even:14,event:2,everyth:[12,13],exacerb:10,exactli:14,exampl:[3,9,10],examplenii:[3,9],exclud:14,exclus:[12,14],execut:[3,9],execute_nm:[3,9,10],exist_ok:14,exp:[3,9],expans:14,expens:10,experiment:5,explain:[3,9,12,13,14],explainedvar:[3,9],explicit:[3,9],exponenti:[3,9],expv:[3,9,14],extend:[3,9,10],extens:[6,11],extra:14,extra_argu:[3,9],extract:14,extrem:10,fade:14,fail:[3,9],fairli:[10,14],fals:[3,9,10,12,14],famili:12,far:12,faster:14,father:12,fcon1000:13,fcon1000_icbm_t:13,fcon1000_icbm_tr:13,fcon1000_t:13,fcon1000_tr:13,fcon:13,fcon_t:13,fcon_tr:13,featru:12,featur:[3,9,10,12,13,14],feature_nam:12,features_nordan:12,features_normsampl:12,feel:12,fellowship:5,femal:12,few:[12,14],fig:[12,14],figsiz:[13,14],figur:[12,14],file:[3,9,10],file_extens:[3,9],file_stem:[3,9],file_typ:[3,9],fileio:[3,9],filenam:[3,9,10,13],fill:14,find:12,first:[3,8,9,10,12,13,14],fit:[3,9,10,14],fix:13,flexibl:[10,14],flori:[6,11],fold:[3,9,10,12],folder:[3,9,12,14],follow:[3,6,9,10,13],font_scal:14,fontsiz:14,form:10,format:[3,9],formerli:10,formul:[3,9],forward:[3,9],forward_yhat:12,found:10,four:12,frac:12,fraction:[6,11],fragment:14,frank:[6,11],fraza:[5,6,11],free:14,freesurf:[13,14],freesurfer_data:14,frequent:2,from:[3,5,6,8,9,10,11,12,13,14],frouin:[6,11],fsl:10,full:[3,9,10],fullnam:[0,1],func:[3,9],fund:5,fundament:10,further:[10,14],garcia:[6,11],gaussian:[2,3,6,9,10,11],gender:13,gener:[3,9,10,14],get:[2,12,13,14],get_arg:[3,9],get_dummi:14,get_n_param:[3,9],getcwd:13,git:[8,14],github:[8,12,13,14],githubusercont:[12,13],give:14,given:[3,9,12],glob3:8,global:14,glossari:2,goal:[6,11],going:[12,14],good:12,googl:[12,13,14],goyard:[6,11],gpml:[3,9],gpr:[3,9,10,12],gprrfa:[3,9],gradent:14,gradient:[3,9],grant:5,gratefulli:5,group:[3,9,12],groupbi:14,haak:[6,11],half:13,handl:[8,14],has:[3,9,10,12,14],have:[10,12,13,14],hbm:[6,11],hbr:[3,9,10],hbr_demo:13,hcp1200_age_gend:14,hcp1200_aparc_lh_thick:14,hcp1200_aparc_rh_thick:14,hcp1200_aparc_thick:14,hcp:14,hcp_cam_eul:14,hcp_euler:14,hcp_idx:14,hcpya:14,hcpya_cam:14,hdr:10,head:10,header:[10,12,14],health:12,heard:12,height:14,help:[10,14],hemi:14,hemispher:[13,14],here:[12,13,14],heterogen:[6,11],heteroskedast:[3,9],hierarch:[2,6,10,11],high:10,hint:12,his:12,holt:[6,11],home:12,hoogman:[6,11],how:[2,12,13,14],howev:[12,14],http:[6,8,11,12,13,14],hue:14,huerta:[6,10,11],huijsden:[6,11],human:[6,11,14],hyp0:[3,9],hyp:[3,9],hyperparamet:[3,9],hyperparmat:[3,9],hypocamp:12,hyppocampal_subfield:12,hyppocampu:12,icbm:13,icbm_t:13,icbm_tr:13,idea:14,identifi:[3,9],idp:13,ids:[3,9,14],idx:13,idxt:13,iii:10,illustr:10,iloc:12,imag:[3,6,9,10,11,12],img:10,implement:[3,9,10],implic:14,includ:[10,12,14],inclus:14,inconclus:12,increas:14,independ:[3,9,10,14],index:[2,12,14],index_col:12,index_label:14,indic:[12,14],individu:[6,11,12],inform:[3,6,9,10,11,12,14],ingrid:12,inlcud:12,inner:[12,14],innov:5,inplac:14,input:[3,9],inscal:[3,9],inspect:14,instal:[2,3,9],instanc:12,instead:14,instruct:10,intercept:14,interest:[10,13,14],intern:[3,6,9,10,11],interp:14,interpol:[3,9,14],interv:12,intervent:[6,11],intialis:[3,9],intro:2,introduc:14,introductori:12,investig:12,involv:10,inx:12,isdir:13,isigma:[3,9],isin:12,its:14,ixi:14,ixi_age_gend:14,ixi_aparc_thick:14,ixi_eul:14,ixi_idx:14,job:[3,9,10],job_id:[3,9],job_nam:[3,9,10],job_path:[3,9],join:[12,13,14],joskowicz:[6,11],joypi:14,joyplot:14,just:[3,9,12,13],kaufmann:[6,11],keep:[3,9,13],kei:[10,13,14],keyword:10,kia:[3,5,6,9,10,11,13],killiani:14,knot:14,know:[3,9,12],knowledg:[12,13],kwarg:[3,9],l0bfg:[3,9],label:12,lambda:[3,9,14],lambda_a:[3,9],larg:[10,14],larger:13,later:13,lbfg:[3,9],ldpkl:13,learn:[3,9,10,13,14],left:[12,13,14],left_ca1:12,left_ca3:12,legend:[12,13,14],len:[12,14],length:[3,9,12],lengthscal:[3,9],lenient:14,lh_bankssts_thick:14,lh_caudalanteriorcingulate_thick:14,lh_caudalmiddlefrontal_thick:14,lh_cuneus_thick:14,lh_entorhinal_thick:14,lh_euler:14,lh_frontalpole_thick:14,lh_fusiform_thick:14,lh_inferiorparietal_thick:14,lh_inferiortemporal_thick:14,lh_insula_thick:14,lh_isthmuscingulate_thick:14,lh_lateraloccipital_thick:14,lh_lateralorbitofrontal_thick:14,lh_lingual_thick:14,lh_meanthickness_thick:[13,14],lh_medialorbitofrontal_thick:14,lh_middletemporal_thick:14,lh_paracentral_thick:14,lh_parahippocampal_thick:14,lh_parsopercularis_thick:14,lh_parsorbitalis_thick:14,lh_parstriangularis_thick:14,lh_pericalcarine_thick:14,lh_postcentral_thick:14,lh_posteriorcingulate_thick:14,lh_precentral_thick:14,lh_precuneus_thick:14,lh_rostralanteriorcingulate_thick:14,lh_rostralmiddlefrontal_thick:14,lh_superiorfrontal_thick:14,lh_superiorparietal_thick:14,lh_superiortemporal_thick:14,lh_supramarginal_thick:14,lh_temporalpole_thick:14,lh_transversetemporal_thick:14,like:[12,14],likelihood:[3,9],limit:[3,9,12],line:[3,8,9,13,14],linear:[2,3,6,9,10,11],linecolor:14,linestyl:12,linewidth:[12,14],list:[3,9,14],literatur:14,lmplot:14,load:[3,9,13,14],load_ascii:[3,9],load_cifti:[3,9],load_data:[3,9],load_nifti:[3,9],load_pd:[3,9],load_response_var:[3,9],loadtxt:14,loc:[12,13,14],local:10,locat:[8,14],log:[3,9,10,13,14],log_dir:[10,13],log_ell_d:[3,9],log_path:[3,9,10,13],log_transf:13,loglik:[3,9],look:12,loop:14,loop_percentage_change_femal:12,loop_percentage_change_mal:12,lose:14,loss:[3,9,13,14],lot:10,loth:[6,11],low:12,luckili:14,lump:[6,11],machin:[3,9,10],made:[3,9,12],mai:[3,9,10],main:[3,9,13],make:[3,8,9,10,12,14],makedir:[13,14],male:12,manag:13,mani:10,map:[3,6,9,11,14],margin:[3,9],mariam:[5,12],marquand:[3,5,6,9,11,13],martel:[6,11],mask:[3,9,10],maskfil:[3,9,10],maskvol:[3,9],master:12,match:12,mateu:[6,11],matplotlib:[12,13,14],matric:[10,14],matrix:[3,9,10,14],max_column:14,max_row:14,maximum:12,mea:14,mead:[3,9],mean:[3,9,10,12,13,14],measur:[10,12,14],median:14,medic:[5,6,11],medicin:[5,6,11],mell:[6,11],memori:[3,9,10],menn:[6,11],mental:[6,11],mention:12,merg:14,meta:10,metadata:[3,9],method:[3,9,10,12,13,14],metric:[3,9,10,13,14],metrics_t:14,metrics_te_sit:14,miccai:[6,11],million:12,mind:12,minimum:10,minmax:[3,9],minor:14,mir:[6,11],mkdir:[13,14],mkl:8,moberget:[6,11],model:[2,3,6,8,9,11],model_path:[3,9,13],model_select:14,modif:10,modul:[2,10],moessnang:[6,11],molecular:[6,11],month:12,more:[10,12,13,14],morten:12,mostafa:5,mostli:[3,9],mother:[3,9],mount:12,mri:14,msll:[3,9,13,14],multi:[6,10,11,13,14],multipl:[3,9,10,14],multipli:14,murphi:[6,11],must:[3,9,10],n_brain_measur:14,n_feat:[3,9],n_iter:[3,9],n_subject:14,name:[3,8,9,10,12,13,14],nan:14,natur:[6,11],necessari:[10,12],need:[3,9,12,13,14],neg:[3,9,14],negloglik:[3,9],nelder:[3,9],neural:10,neuroanatom:[6,11],neuroimag:[3,6,9,10,11,14],neurologist:12,neurosci:[6,10,11,13,14],newaxi:14,next:[12,14],nibabel:8,nice:14,nifti:[3,9,10],nii:10,nispat:10,nlz:[3,9],nm_demo:10,node:[10,13],nois:[3,9],none:[3,9,12,14],nonlinear:14,norm:[2,3,6,8,9,11,13],norm_demograph:12,norm_demographics_featur:12,norm_featur:12,norm_sample_demograph:12,norm_sample_featur:12,normative_parallel:[3,9,10,13],normative_path:[3,9,10],normsampl:12,notat:[3,9],note:[3,9,10,12,14],notebook:[13,14],now:[12,13,14],nte:[3,9],num:14,num_fold:10,number:[3,9,10,12,13,14],numpi:[3,8,9,12,13,14],nwo:5,object:[3,9],observ:13,obvi:14,obviou:[10,14],obvious:13,offer:10,oldehinkel:[6,11],onc:[10,12,14],one:[3,9,10,12,14],ones:14,onli:[3,9,10,12,13,14],onto:13,oort:[6,11],open:13,oper:10,optim:[3,9,10,14],optimis:[3,9],option:[10,13],order:[3,9,12,13,14],ordinari:[3,9],org:[6,11],organ:14,organis:5,other:[2,8,10,12],otherwis:[3,8,9,13],ouptut:[3,9],ouput:[3,9],our:14,out:[10,12,14],outer:[12,14],output:[3,9,10,12,13,14],output_path:13,outputal:[3,9],outputsuffix:[3,9,10,12,13],outscal:[3,9],over:[3,9],overal:14,overlap:[12,14],overview:10,own:14,packag:[8,10,13],packeag:[3,9],page:[13,14],panda:[8,10,12,13,14],paper:13,paralel:13,parallel:[3,9,10],param:[3,9],paramatet:10,paramet:[3,9,10,14],parameteris:14,parametr:[3,9,13,14],pars:[3,9],particip:[12,13,14],pass:[3,9,10],path:[3,8,9,10,12,13,14],pathfor:[3,9],patient:[6,11],pattern:[3,9],pcn:[10,12],pcntoolkit:[2,8,13,14],peak:14,pearson:[3,9,13,14],penal:[3,9],penalized_loglik:[3,9],penalti:[3,9],per:[10,13,14],percentage_chang:12,percentage_change_femal:12,percentage_change_mal:12,perpar:12,person:12,phi:14,pickl:[10,13],piecewis:14,pierr:13,pip:[8,12,13,14],pkl:[3,9,10,13],place:12,pleas:[6,12],plot:[10,12,14],plt:[12,13,14],plu:8,point:[3,9,12,13,14],polynomi:[3,9,14],popul:12,possibl:[3,9,14],post:[3,9],posterior:[3,9],potenti:12,powel:[3,9,10,14],power:12,practic:[10,12],pre:[3,9,13],precis:[3,9],predict:[3,6,9,10,11,13,14],predict_and_adjust:[3,9],predictive_interv:[3,9],prefix:8,prepar:[3,9],preporcess:12,prercentag:12,present:[10,12],previous:13,prho:[3,9,13,14],primarili:[3,9,12],primer:14,principl:14,print:[8,12,13,14],prior:[3,9,12,13],probabl:[12,13],procedur:[10,12],process:[2,3,6,9,10,11,13],processing_dir:[3,9,13],project:[13,14],properti:12,provid:[3,9,10,12],psuedo:14,psychiatr:[6,11],psychiatri:[6,11],psychiatrist:12,psycholog:[6,11],ptk:13,publish:[6,11],purpos:10,put:12,pymc3:13,pymc:13,pyplot:[12,13,14],python3:8,python:[3,8,9,10],python_path:[3,9,10],qestion:12,qsub:[3,9],qsub_nm:[3,9],qualiti:12,quantiti:[3,9],queri:14,question:[2,12],quickli:13,quit:10,racoceanu:[6,11],rahimi:10,random:[3,9,10,13,14],random_st:14,randomli:13,rang:12,rasmussen:[3,9],rather:[10,14],raw:[12,13],read:12,read_csv:[12,13,14],readi:13,realiti:12,reassembl:10,recent:12,recht:10,recognit:[3,9],recommend:12,recon:14,reduc:14,refer:[2,3,9,10,12,14],reflect:14,regard:10,regex:14,region:[10,12,14],regress:[2,3,6,9,10,11],regularis:[3,9],rel:[12,14],relev:[3,9,12],rema:13,remain:13,rememb:12,remov:[10,13,14],replac:[13,14],replic:[6,11],repo:[8,14],report:[3,9],repositori:14,repres:14,requir:[3,8,9,10,12,13,14],rerun:[3,9],rerun_nm:[3,9],research:5,reset:14,reset_index:14,reshap:[3,9,10],residu:[3,9],resolut:10,resp:10,resp_:14,resp_fil:10,resp_file_t:[10,14],resp_file_tr:[10,14],resp_t:14,resp_te_:14,resp_test:10,resp_tr:14,resp_tr_:14,resp_train:10,respect:[3,9,10],respfil:[3,9,12,13],respfile_path:[3,9],respons:[3,8,9,10,13,14],response_fil:14,responsefil:[3,9],restrict:12,result:[3,9,10],retriev:[3,9],review:[6,11],rezek:[6,11],rfa:[3,9,10],rh_bankssts_thick:14,rh_caudalanteriorcingulate_thick:14,rh_caudalmiddlefrontal_thick:14,rh_cuneus_thick:14,rh_entorhinal_thick:14,rh_euler:14,rh_frontalpole_thick:14,rh_fusiform_thick:14,rh_inferiorparietal_thick:14,rh_inferiortemporal_thick:14,rh_insula_thick:14,rh_isthmuscingulate_thick:14,rh_lateraloccipital_thick:14,rh_lateralorbitofrontal_thick:14,rh_lingual_thick:14,rh_meanthickness_thick:[13,14],rh_medialorbitofrontal_thick:14,rh_middletemporal_thick:14,rh_paracentral_thick:14,rh_parahippocampal_thick:14,rh_parsopercularis_thick:14,rh_parsorbitalis_thick:14,rh_parstriangularis_thick:14,rh_pericalcarine_thick:14,rh_postcentral_thick:14,rh_posteriorcingulate_thick:14,rh_precentral_thick:14,rh_precuneus_thick:14,rh_rostralanteriorcingulate_thick:14,rh_rostralmiddlefrontal_thick:14,rh_superiorfrontal_thick:14,rh_superiorparietal_thick:14,rh_superiortemporal_thick:14,rh_supramarginal_thick:14,rh_temporalpole_thick:14,rh_transversetemporal_thick:14,rho:[3,9,13,14],richard:5,rid:12,right:[13,14],right_ca1:12,right_ca3:12,rmse:[3,9,13,14],rmtmp:[3,9],robminmax:[3,9],roi:[13,14],roi_dir:14,roi_dir_nam:14,roi_id:14,roi_model:14,rokicki:[6,11],root:[3,9,13,14],rotat:12,roughli:10,routin:[3,9],row:[3,9,10,13,14],run:[3,8,9,10,12,13],rutherford:[5,12,13,14],s0033291719000084:[6,11],s2_forward:[3,9],s2_te:14,s41380:[6,11],s41398:[6,11],s41562:[6,11],s_hat:12,saig:[5,12,13,14],saigerutherford:[12,14],sairut:13,same:[3,9,10,12,14],sampl:[12,13],save:[3,9,10,12,14],save_ascii:[3,9],save_cifti:[3,9],save_nifti:[3,9],save_path:[3,9],save_pd:[3,9],save_result:[3,9],savemodel:[10,13,14],saveoutput:[3,9,10,14],savetxt:14,sbatch_nm:[3,9],sbatchwrap_nm:[3,9],scalabl:[6,11],scalar:[3,9],scale:[3,9,14],scan:14,scanner:13,scanner_id:13,scatter:[12,13],scheme:[3,9],schizophrenia:[6,11],scientif:5,scipi:[8,13],scipt:[3,9],score:[3,9,10,12,13,14],screen:12,script:[3,8,9],seaborn:14,second:[3,9],see:[3,9,10,13,14],sei:5,select:12,self:[3,9],sep:[12,14],separ:[10,14],seper:12,set:[3,9,10,12,14],set_opt:14,set_them:14,set_xlabel:13,set_ylabel:13,setup:[8,14],sex:[12,13,14],sex_covari:12,sf2:[3,9],shape:[12,13,14],shortcut:10,should:[3,9,10,14],show:[10,12,14],shown:10,sigma:[3,9],sigma_a:[3,9],signal:[3,9],similar:12,similarli:13,simpl:13,simplest:10,simpli:10,sinc:10,singl:[10,13,14],site:[3,6,9,10,11,14],site_cam:14,site_hcp:14,site_ixi:14,site_median:14,site_nam:14,sitenum:13,size:[3,9,10,13,14],skip:[10,12],sklearn:[8,14],slowli:14,smse:[3,9,13,14],sn2:[3,9],sneak:14,snippet:10,sns:14,soli:[6,11],some:[6,10,13,14],sort_nic:[3,9],sourc:[3,8,9,13],space:[3,9,10,12],spatial:[6,11],specialist:12,specif:[3,9,12,13,14],specifi:[3,9,10,13,14],spectrum:[6,11],spline:14,split:[6,10,11,13],split_nm:[3,9],springer:[3,6,9,11],sqrt:14,squar:[3,9,13,14],stack:14,standard:[3,9,10,13,14],standardis:[3,9,13,14],start:[2,3,9,14],stat:[6,11],std:12,step:[10,12],still:14,stop:14,store:[3,9,10,14],stoyanov:[6,11],str:13,stratifi:[6,11,14],strict:14,string:[3,9,13,14],structur:[6,11,14],strutur:[3,9],studi:[6,11,14],stuff:2,style:14,sub:[3,9,14],subject:[3,9,10,12,13,14],subjects_dir:14,submiss:[3,9],submit:[3,9,10],subplot:[12,13],subset:[13,14],subtract:14,suffix:[10,12,13],suggest:12,suit:13,sum:[3,9,13],superior:14,suppli:14,support:[3,9,10],sure:[8,14],surfac:[3,9,10],system:[3,8,9],tab:[3,9,10],tabl:[10,14],tablefil:14,take:[3,9,14],target:[3,9,12],task:[6,10,11],te_cov_s:14,te_resp_s:14,techniqu:[10,12],tell:10,temporari:10,tensor:[6,11],term:12,termin:14,test:[3,8,9,10,12],test_siz:14,testcov:[3,9,10,12,13,14],testcovfile_path:[3,9,13],testresp:[3,9,10,12,13,14],testrespfile_path:[3,9,13],testrespons:[3,9],text:[3,9,10,14],than:[10,12,14],theano:13,thei:[10,12,13,14],them:[12,14],theparticular:[3,9],therefor:12,theta:[3,9],thi:[3,9,10,12,13,14],thick:[10,13],think:10,thoma:[5,12],those:[3,9,12],though:14,three:13,threshold:14,through:[12,13,14],thu:[3,9],tillmann:[6,11],time:[10,12,14],timeseri:10,titl:[12,14],to_csv:[12,13,14],to_list:14,to_numpi:13,togeth:[12,14],tol:[3,9],too:14,tool:12,toolbox:[3,5,9],toolkit:[10,12],topographi:[6,11],torch:8,torqu:[3,9,10],tr_cov_siz:14,tr_resp_siz:14,train:[3,8,9,10],train_test_split:14,trajectori:12,transfer:[3,9,10],translat:[6,11],trbefil:[3,9,13],treatment:12,trejactori:12,trejactroi:12,trejectori:12,trend:[3,9,10,14],trendcoeff:[3,9],trendsurf:[3,9],troubl:12,trust:5,tryint:[3,9],tsbefil:13,tsv:10,turn:12,tutori:[2,12,13,14],two:[3,9,12,13],txbefil:13,txt:[3,9,10,12,14],type:[3,9],typic:10,uncertainti:10,uncom:[13,14],under:[3,9,14],underlin:[0,1],understan:12,understand:[6,11,12],undertak:12,uniform:13,uninstal:13,uniqu:13,unstandard:14,until:10,updat:[2,3,9],upload:12,upper:12,usag:[3,9],use:[3,6,9,10,12,13,14],used:[3,9,12,13,14],useful:[3,9],user:[13,14],uses:[3,9],using:[3,6,9,10,11,12,13,14],util:14,valid:[3,9,10,12],valu:[3,9,10,12,13,14],van:[6,11],var_covariates_test:[3,9],var_groups_adapt:[3,9],var_groups_test:[3,9],var_groups_xxx:[3,9],vari:14,variabl:[3,8,9,10,12,13,14],varianc:[3,9,10,13,14],variat:[6,10,11],variou:13,vector:[3,9,10],verbos:[3,9],veri:14,verifi:14,vernieuwingsimpul:5,version:[8,13],vertex:10,via:5,vidi:5,view:[12,13,14],visit:12,visual:14,visul:12,vol2vec:[3,9],vol:[3,9],volatla:[3,9],volum:12,voxel:[3,9,10],voxelwis:10,wai:[3,9,12,14],wait:10,walltim:10,want:[13,14],warp:[3,6,9,10,11],well:[12,13],wellcom:5,were:[12,14],westly:[6,11],wget:12,what:[12,13],whatev:12,when:[3,9,12,14],where:[3,9,10,12,13,14],wherev:14,whether:[3,9,14],which:[3,8,9,10,13,14],whichev:14,whilst:14,who:12,whole:14,why:[12,14],width:14,william:[3,9],wise:[3,9,10],without:10,witten:[3,9],wolfer:[3,5,6,9,11,12],work:[3,6,9,13],working_dir:10,worthi:14,would:[12,14],wouldn:14,wrap:[3,9],write:[3,9,10,14],write_nii:[3,9],written:[3,9,13],www:8,x_adapt:13,x_col_nam:14,x_forward:12,x_te:14,x_test:[13,14],x_test_txfr:13,x_tr:14,x_train:[13,14],xcov:[3,9],xdot:12,xlabel:14,xlim:14,xmax:14,xmin:14,xtick:12,xz_tr:[3,9],y_adapt:13,y_mean:14,y_mean_te_sit:14,y_te:14,y_test:[13,14],y_test_txfr:13,y_tr:14,y_train:[13,14],y_var:14,y_var_te_sit:14,ya_eul:14,year:12,yhat:[3,9,10,12,13,14],yhat_forward:12,yhat_mean:14,yhat_mean_te_sit:14,yhat_t:14,yhat_var:14,yhat_var_te_sit:14,yield:[3,9],ylabel:14,ylim:[12,14],you:[6,12,13,14],young:14,your:[10,12,13,14],ys2:[3,9,12,13,14],ys2_2fold:12,z_score:13,zabihi:[5,6,11,12],zero:[3,9,12,14],zhou:[6,11],zuluaga:[6,11]},titles:["<no title>","<no title>","Predictive Clinical Neuroscience toolkit","Module Index","Frequently Asked Questions","Acknowledgements","How to cite PCNtoolkit","Glossary","Installation","Module Index","PCNtoolkit Background","List of references","Gaussian Process Regression","Hierarchical Bayesian Regression","Bayesian Linear Regression","List of updates"],titleterms:{"function":12,acknowledg:5,altern:8,appli:12,ask:4,background:[10,12],basic:[8,10],batch:13,bayesian:[13,14],brain:14,calcul:12,chang:12,cite:6,clinic:2,combin:14,command:10,configur:13,cortic:14,covari:[13,14],covariate_normsampl:12,creat:14,data:[10,12,13,14],datafram:14,dementia:12,deviat:12,doe:12,effect:13,estim:[10,12,13],extrem:12,file:[12,13,14],folder:13,format:[10,12,14],forward:12,frequent:4,gaussian:12,glossari:7,grab:[13,14],groom:13,hbr:13,hierarch:13,how:6,hyppocamp:12,implement:12,index:[3,9],input:[12,13],instal:[8,12,13,14],interpret:[13,14],intro:10,librari:[13,14],line:10,linear:14,load:12,local:8,machin:8,measur:13,model:[10,12,13,14],modul:[3,9],necessari:[13,14],neurosci:2,nordan:12,norm:[10,12,14],option:12,overview:13,paralellis:10,patient:12,pcntoolkit:[6,10,12],percent:12,percentag:12,perform:[13,14],predict:2,prepar:[12,13,14],process:12,question:4,quickstart:8,refer:11,regress:[12,13,14],resourc:8,run:14,script:10,set:13,share:8,site:13,speed:10,split:14,step:[13,14],stori:12,subfield:12,task:12,test:[13,14],testresponse_normsampl:12,thick:14,thing:10,toolkit:2,train:[13,14],transfer:13,unseen:13,updat:15,usag:[8,10],visual:12,what:10,which:12}}) \ No newline at end of file diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 00000000..54e34c46 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,15 @@ +sphinx_rtd_theme +sphinx-gallery +sphinx-tabs +sphinx-automodapi +argparse +nibabel>=2.5.1 +six +sklearn +bspline +matplotlib +numpy>=1.19.5 +scipy>=1.3.2 +pandas>=0.25.3 +torch>=1.1.0 +pymc3>=3.11.2 diff --git a/doc/source/_static/css/functions.css b/doc/source/_static/css/functions.css new file mode 100644 index 00000000..574685be --- /dev/null +++ b/doc/source/_static/css/functions.css @@ -0,0 +1,5 @@ +.function { + border-bottom: 3px solid #d0d0d0; + padding-bottom: 10px; + padding-top: 10px; +} \ No newline at end of file diff --git a/doc/source/_static/css/pcntoolkit.css b/doc/source/_static/css/pcntoolkit.css new file mode 100644 index 00000000..0231067a --- /dev/null +++ b/doc/source/_static/css/pcntoolkit.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before,.wy-nav-top a,.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li span.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li span.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li span.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li span.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li span.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p.caption .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.btn .wy-menu-vertical li span.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p.caption .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.nav .wy-menu-vertical li span.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p.caption .btn .headerlink,.rst-content p.caption .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li span.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#F8F8F8}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#404040;background:#E1E1E1;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e5e9f6}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#1E90FF}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#e5e9f6}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#e5e9f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#1E90FF}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#9fefef}.wy-tray-container li.wy-tray-item-info{background:#1E90FF}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#9fefef;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#1E90FF;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#1E90FF!important}.btn-info:hover{background-color:#1E90FF!important}.btn-neutral{background-color:#e5e9f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#9fefef!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#1E90FF;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#1E90FF!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#ffffff;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#1E90FF;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#ffffff;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#1E90FF;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #1E90FF}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:##e5e9f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#9fefef}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#9fefef}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#1E90FF}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#e5e9f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#1E90FF;text-decoration:none;cursor:pointer}a:hover{color:#1E90FF}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#1E90FF!important}a.wy-text-info:hover{color:#1E90FF!important}.wy-text-success{color:#9fefef!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #1E90FF;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e5e9f6;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol li,.rst-content ol.arabic li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content ol.arabic li p:last-child,.rst-content ol.arabic li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.rst-content .wy-breadcrumbs li tt,.wy-breadcrumbs li .rst-content tt,.wy-breadcrumbs li code{padding:5px;border:none;background:none}.rst-content .wy-breadcrumbs li tt.literal,.wy-breadcrumbs li .rst-content tt.literal,.wy-breadcrumbs li code.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#1E90FF;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#ffffff;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#ffffff}.wy-menu-vertical li.current>a:hover span.toctree-expand,.wy-menu-vertical li.on a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#d9d9d9}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:93%;color:#1E90FF}.wy-menu-vertical a:hover{background-color:hsla(0,0%,100%,.8);cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#1E90FF;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#1E90FF;text-align:center;color:#ffffff}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#1E90FF}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#1E90FF;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#ffffff;font-size:168%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#1E90FF}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#1E90FF;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#ffffff}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#e7e5f6;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#1E90FF;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#1E90FF;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#ffffff;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e5e9f6;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#ffffff}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#ffffff}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#ffffff;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#1E90FF;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#9fefef;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand{color:#ffffff}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#ffffff}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #ffffff;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp{user-select:none;pointer-events:none}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content .code-block-caption .headerlink:after,.rst-content .toctree-wrapper>p.caption .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"\f0c1";font-family:FontAwesome}.rst-content .code-block-caption:hover .headerlink:after,.rst-content .toctree-wrapper>p.caption:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e5e9f6}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e5e9f6;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .hlist{width:100%}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl dt span.classifier:before{content:" : "}html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.field-list>dt:after,html.writer-html5 .rst-content dl.footnote>dt:after{content:":"}html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.footnote>dt>span.brackets{margin-right:.5rem}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{font-style:italic}html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.footnote>dd p,html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e5e9f6}html.writer-html5 .rst-content table.docutils th{border:1px solid #e5e9f6}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#1E90FF}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7e5f6;color:#1E90FF;border-top:3px solid #1E90FF;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:before{color:#1E90FF}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code,html.writer-html4 .rst-content dl:not(.docutils) tt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#ef9fe4;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #1E90FF;background:#e5e9f6;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} pre.literal-block{border:1px solid #ffffff;white-space:pre;margin:1px 0 24px 0;padding:12px 12px;overflow-x:auto;background:#e7e5f6;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.5;display:block;color:#404040} \ No newline at end of file diff --git a/doc/source/_static/css/pcntoolkit_nomaxwidth.css b/doc/source/_static/css/pcntoolkit_nomaxwidth.css new file mode 100644 index 00000000..fef75932 --- /dev/null +++ b/doc/source/_static/css/pcntoolkit_nomaxwidth.css @@ -0,0 +1,8 @@ + +.wy-nav-content { + max-width: none; +} + + +@media screen and (max-width:768px){.tablet-hide{display:none}} +@media screen and (max-width:480px){.mobile-hide{display:none}} diff --git a/doc/source/_static/css/pcntoolkit_tabs.css b/doc/source/_static/css/pcntoolkit_tabs.css new file mode 100644 index 00000000..db063843 --- /dev/null +++ b/doc/source/_static/css/pcntoolkit_tabs.css @@ -0,0 +1,44 @@ +.sphinx-tabs { + margin-bottom: 2em; +} + +.sphinx-tabs:last-child { + margin-bottom: 1em; +} + +.sphinx-tabs .sphinx-menu .item p { + margin: 0; +} + +.sphinx-tabs .sphinx-menu a.item { + color: #1E90FF !important; +} + +.sphinx-tabs .sphinx-menu { + border-bottom-color: #1E90FF !important; + display: flex; + flex-direction: row; + flex-wrap: wrap; +} + +.sphinx-tabs .sphinx-menu a.active.item { + border-color: #1E90FF !important; +} + +.sphinx-tab { + border-color: #1E90FF !important; + box-sizing: border-box; +} + +.sphinx-tab.tab.active { + margin-bottom: 0; +} + +/* Code tabs don't need the code-block border */ +.code-tab.tab { + padding: 0.4em !important; +} + +.code-tab.tab div[class^='highlight'] { + border: none; +} diff --git a/doc/source/_static/pcn-logo.png b/doc/source/_static/pcn-logo.png new file mode 100644 index 00000000..f08cca9b Binary files /dev/null and b/doc/source/_static/pcn-logo.png differ diff --git a/doc/source/_templates/class.rst b/doc/source/_templates/class.rst new file mode 100644 index 00000000..e9885ddb --- /dev/null +++ b/doc/source/_templates/class.rst @@ -0,0 +1,12 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :no-members: + :no-inherited-members: + +.. raw:: html + +
        \ No newline at end of file diff --git a/doc/source/_templates/function.rst b/doc/source/_templates/function.rst new file mode 100644 index 00000000..4e6d1428 --- /dev/null +++ b/doc/source/_templates/function.rst @@ -0,0 +1,12 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autofunction:: {{ objname }} + +.. .. include:: modules/{{ module }}.{{ objname }}.examples + +.. raw:: html + +
        \ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py index 49209107..c60f1375 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -18,7 +18,14 @@ # import os import sys +import sphinx_rtd_theme sys.path.insert(0, os.path.abspath('../../pcntoolkit')) +sys.path.insert(0, os.path.abspath('../../pcntoolkit/dataio')) +sys.path.insert(0, os.path.abspath('../../pcntoolkit/model')) +sys.path.insert(0, os.path.abspath('../../pcntoolkit/normative_model')) +sys.path.insert(0, os.path.abspath('../../pcntoolkit/utils')) + + # -- General configuration ------------------------------------------------ @@ -29,15 +36,32 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.imgmath', - 'sphinx.ext.viewcode', - 'sphinx.ext.githubpages', -] +extensions = ['sphinx_tabs.tabs', + 'sphinx.ext.autodoc', + 'sphinx.ext.imgmath', + 'sphinx.ext.githubpages', + 'sphinx.ext.autosectionlabel', + 'sphinx.ext.autosummary', + 'sphinx_automodapi.automodapi', + #'sphinx.ext.doctest', + #'sphinx.ext.intersphinx', + #'sphinx.ext.mathjax', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + #'sphinxarg.ext', + ] + +autosummary_generate = True +autodoc_default_options = {'members': True, 'inherited-members': True} +numpydoc_show_class_members = False +autoclass_content = "class" -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +highlight_language ='none' # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: @@ -55,66 +79,22 @@ # General information about the project. project = u'Predictive Clinical Neuroscience Toolkit' copyright = u'2020, Andre F. Marquand' -author = u'Andre F. Marquand' +author = u'Saige Rutherford, Andre Marquand' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'0.17' +version = u'0.20' # The full version, including alpha/beta/rc tags. -release = u'0.17' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# -# today = '' -# -# Else, today_fmt is used as the format for a strftime call. -# -# today_fmt = '%B %d, %Y' +release = u'0.20' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] -# The reST default role (used for this markup: `text`) to use for all -# documents. -# -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -124,220 +104,35 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'bizstyle' +html_theme = 'sphinx_rtd_theme' +html_theme_options = { 'style_nav_header_background': '#1E90FF'} -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. -# " v documentation" by default. -# -# html_title = u'Spatial methods for neuroimaging v0.1' - -# A shorter title for the navigation bar. Default is the same as html_title. -# -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# -# html_logo = None +pygments_style = 'sphinx' -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# -# html_favicon = None +# Add any paths that contain templates here, relative to this directory. +templates_path = ['pages/_templates'] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# -# html_extra_path = [] - -# If not None, a 'Last updated on:' timestamp is inserted at every page -# bottom, using the given strftime format. -# The empty string is equivalent to '%b %d, %Y'. -# -# html_last_updated_fmt = None +# These paths are either relative to html_static_path +# or fully qualified paths (eg. https://...) +html_css_files = ['pages/css/pcntoolkit.css', + 'pages/css/pcntoolkit_nomaxwidth.css'] -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# -# html_additional_pages = {} - -# If false, no module index is generated. -# -# html_domain_indices = True - -# If false, no index is generated. -# -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# -# html_show_sourcelink = True +# add custom files that are stored in _static +def setup(app): + app.add_css_file('pages/css/pcntoolkit_tabs.css') -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -# -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -# -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# -# html_search_scorer = 'scorer.js' +# add logo +html_logo = "pcn-logo.png" +html_theme_options = { + 'logo_only': True, + 'display_version': False, +} # Output file base name for HTML help builder. htmlhelp_basename = 'PCNtoolkitdoc' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'PCNtoolkitdoc.tex', u'PCNtoolkit documentation', - u'Andre F. Marquand', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# -# latex_use_parts = False - -# If true, show page references after internal links. -# -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# -# latex_appendices = [] - -# It false, will not define \strong, \code, itleref, \crossref ... but only -# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added -# packages. -# -# latex_keep_old_macro_names = True - -# If false, no module index is generated. -# -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'PCNtoolkitdoc', u'PCNtoolkit documentation', - [author], 1) -] - -# If true, show URL addresses after external links. -# -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'PCNtoolkitdoc', u'PCNtoolkit documentationn', - author, 'PCNtoolkitdoc', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# -# texinfo_appendices = [] - -# If false, no module index is generated. -# -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# -# texinfo_no_detailmenu = False diff --git a/doc/source/index.rst b/doc/source/index.rst index ac5f5872..0e08e020 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,22 +1,50 @@ -.. Spatial methods for neuroimaging documentation master file, created by - sphinx-quickstart on Tue Aug 23 15:22:22 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. Predictive Clinical Neuroscience toolkit ======================================== -Contents: .. toctree:: - :maxdepth: 2 + :maxdepth: 1 + :caption: Getting started + + pages/installation.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Background + + pages/pcntoolkit_background.rst +.. toctree:: + :maxdepth: 1 + :caption: Function & Class Docs + modindex.rst -Indices and tables -================== +.. toctree:: + :maxdepth: 1 + :caption: Current Events + + pages/updates.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Tutorials + + pages/tutorial_CPC2020.rst + pages/tutorial_ROIcorticalthickness.rst + pages/tutorial_HBR.rst + -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` +.. toctree:: + :maxdepth: 1 + :caption: Other Useful Stuff + pages/FAQs.rst + pages/glossary.rst + pages/citing.rst + pages/references.rst + pages/acknowledgements.rst + diff --git a/doc/source/modindex.rst b/doc/source/modindex.rst index b44c2ac5..dd73caf6 100644 --- a/doc/source/modindex.rst +++ b/doc/source/modindex.rst @@ -43,8 +43,8 @@ Module Index :inherited-members: :show-inheritance: -.. automodule:: utils +.. automodule:: util :members: :undoc-members: :inherited-members: - :show-inheritance: \ No newline at end of file + :show-inheritance: diff --git a/doc/source/pages/FAQs.rst b/doc/source/pages/FAQs.rst new file mode 100644 index 00000000..3f846e76 --- /dev/null +++ b/doc/source/pages/FAQs.rst @@ -0,0 +1,3 @@ +Frequently Asked Questions +==================================== + diff --git a/doc/source/pages/_static/css/functions.css b/doc/source/pages/_static/css/functions.css new file mode 100644 index 00000000..574685be --- /dev/null +++ b/doc/source/pages/_static/css/functions.css @@ -0,0 +1,5 @@ +.function { + border-bottom: 3px solid #d0d0d0; + padding-bottom: 10px; + padding-top: 10px; +} \ No newline at end of file diff --git a/doc/source/pages/_static/css/pcntoolkit.css b/doc/source/pages/_static/css/pcntoolkit.css new file mode 100644 index 00000000..0231067a --- /dev/null +++ b/doc/source/pages/_static/css/pcntoolkit.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before,.wy-nav-top a,.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li span.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li span.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li span.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li span.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li span.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p.caption .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.btn .wy-menu-vertical li span.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p.caption .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.nav .wy-menu-vertical li span.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p.caption .btn .headerlink,.rst-content p.caption .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li span.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#F8F8F8}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#404040;background:#E1E1E1;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e5e9f6}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#1E90FF}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#e5e9f6}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#e5e9f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#1E90FF}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#9fefef}.wy-tray-container li.wy-tray-item-info{background:#1E90FF}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#9fefef;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#1E90FF;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#1E90FF!important}.btn-info:hover{background-color:#1E90FF!important}.btn-neutral{background-color:#e5e9f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#9fefef!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#1E90FF;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#1E90FF!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#ffffff;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#1E90FF;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#ffffff;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#1E90FF;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #1E90FF}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:##e5e9f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#9fefef}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#9fefef}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#1E90FF}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#e5e9f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#1E90FF;text-decoration:none;cursor:pointer}a:hover{color:#1E90FF}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#1E90FF!important}a.wy-text-info:hover{color:#1E90FF!important}.wy-text-success{color:#9fefef!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #1E90FF;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e5e9f6;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol li,.rst-content ol.arabic li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content ol.arabic li p:last-child,.rst-content ol.arabic li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.rst-content .wy-breadcrumbs li tt,.wy-breadcrumbs li .rst-content tt,.wy-breadcrumbs li code{padding:5px;border:none;background:none}.rst-content .wy-breadcrumbs li tt.literal,.wy-breadcrumbs li .rst-content tt.literal,.wy-breadcrumbs li code.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#1E90FF;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#ffffff;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#ffffff}.wy-menu-vertical li.current>a:hover span.toctree-expand,.wy-menu-vertical li.on a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#d9d9d9}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:93%;color:#1E90FF}.wy-menu-vertical a:hover{background-color:hsla(0,0%,100%,.8);cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#1E90FF;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#1E90FF;text-align:center;color:#ffffff}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#1E90FF}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#1E90FF;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#ffffff;font-size:168%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#1E90FF}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#1E90FF;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#ffffff}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#e7e5f6;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#1E90FF;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#1E90FF;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#ffffff;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e5e9f6;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#ffffff}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#ffffff}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#ffffff;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#1E90FF;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#9fefef;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand{color:#ffffff}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#ffffff}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #ffffff;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp{user-select:none;pointer-events:none}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content .code-block-caption .headerlink:after,.rst-content .toctree-wrapper>p.caption .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"\f0c1";font-family:FontAwesome}.rst-content .code-block-caption:hover .headerlink:after,.rst-content .toctree-wrapper>p.caption:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e5e9f6}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e5e9f6;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .hlist{width:100%}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl dt span.classifier:before{content:" : "}html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.field-list>dt:after,html.writer-html5 .rst-content dl.footnote>dt:after{content:":"}html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.footnote>dt>span.brackets{margin-right:.5rem}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{font-style:italic}html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.footnote>dd p,html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e5e9f6}html.writer-html5 .rst-content table.docutils th{border:1px solid #e5e9f6}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#1E90FF}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7e5f6;color:#1E90FF;border-top:3px solid #1E90FF;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:before{color:#1E90FF}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code,html.writer-html4 .rst-content dl:not(.docutils) tt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#ef9fe4;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #1E90FF;background:#e5e9f6;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} pre.literal-block{border:1px solid #ffffff;white-space:pre;margin:1px 0 24px 0;padding:12px 12px;overflow-x:auto;background:#e7e5f6;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.5;display:block;color:#404040} \ No newline at end of file diff --git a/doc/source/pages/_static/css/pcntoolkit_nomaxwidth.css b/doc/source/pages/_static/css/pcntoolkit_nomaxwidth.css new file mode 100644 index 00000000..fef75932 --- /dev/null +++ b/doc/source/pages/_static/css/pcntoolkit_nomaxwidth.css @@ -0,0 +1,8 @@ + +.wy-nav-content { + max-width: none; +} + + +@media screen and (max-width:768px){.tablet-hide{display:none}} +@media screen and (max-width:480px){.mobile-hide{display:none}} diff --git a/doc/source/pages/_static/css/pcntoolkit_tabs.css b/doc/source/pages/_static/css/pcntoolkit_tabs.css new file mode 100644 index 00000000..db063843 --- /dev/null +++ b/doc/source/pages/_static/css/pcntoolkit_tabs.css @@ -0,0 +1,44 @@ +.sphinx-tabs { + margin-bottom: 2em; +} + +.sphinx-tabs:last-child { + margin-bottom: 1em; +} + +.sphinx-tabs .sphinx-menu .item p { + margin: 0; +} + +.sphinx-tabs .sphinx-menu a.item { + color: #1E90FF !important; +} + +.sphinx-tabs .sphinx-menu { + border-bottom-color: #1E90FF !important; + display: flex; + flex-direction: row; + flex-wrap: wrap; +} + +.sphinx-tabs .sphinx-menu a.active.item { + border-color: #1E90FF !important; +} + +.sphinx-tab { + border-color: #1E90FF !important; + box-sizing: border-box; +} + +.sphinx-tab.tab.active { + margin-bottom: 0; +} + +/* Code tabs don't need the code-block border */ +.code-tab.tab { + padding: 0.4em !important; +} + +.code-tab.tab div[class^='highlight'] { + border: none; +} diff --git a/doc/source/pages/_static/pcn-logo.png b/doc/source/pages/_static/pcn-logo.png new file mode 100644 index 00000000..f08cca9b Binary files /dev/null and b/doc/source/pages/_static/pcn-logo.png differ diff --git a/doc/source/pages/acknowledgements.rst b/doc/source/pages/acknowledgements.rst new file mode 100644 index 00000000..84d9b871 --- /dev/null +++ b/doc/source/pages/acknowledgements.rst @@ -0,0 +1,14 @@ +Acknowledgements +================== + +We gratefully acknowledge funding from the Dutch Organisation for Scientific Research (NWO), via a Vernieuwingsimpuls VIDI fellowship, from the UK Wellcome Trust via a Digital Innovator grant and from the UK Medical Research Council via an Experimental Medicine Challenge Grant. + +Core developers of the toolbox are: + +- Andre Marquand +- Seyed Mostafa Kia +- Thomas Wolfers +- Saige Rutherford +- Richard Dinga +- Mariam Zabihi +- Charlotte Fraza diff --git a/doc/source/pages/citing.rst b/doc/source/pages/citing.rst new file mode 100644 index 00000000..fa9930d3 --- /dev/null +++ b/doc/source/pages/citing.rst @@ -0,0 +1,31 @@ +How to cite PCNtoolkit +==================================== + +If you use the PCNtoolkit, please consider citing some of the following work: + + +Marquand, A. F., Wolfers, T., Mennes, M., Buitelaar, J., & Beckmann, C. F. (2016). Beyond Lumping and Splitting: A Review of Computational Approaches for Stratifying Psychiatric Disorders. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 1(5), 433–447. https://doi.org/10.1016/j.bpsc.2016.04.002 + +Marquand, A. F., Rezek, I., Buitelaar, J., & Beckmann, C. F. (2016). Understanding Heterogeneity in Clinical Cohorts Using Normative Models: Beyond Case-Control Studies. Biological Psychiatry, 80(7), 552–561. https://doi.org/10.1016/j.biopsych.2015.12.023 + +Marquand, A. F., Kia, S. M., Zabihi, M., Wolfers, T., Buitelaar, J. K., & Beckmann, C. F. (2019). Conceptualizing mental disorders as deviations from normative functioning. Molecular Psychiatry, 24(10), 1415–1424. https://doi.org/10.1038/s41380-019-0441-1 + +Marquand, A. F., Haak, K. V., & Beckmann, C. F. (2017). Functional corticostriatal connection topographies predict goal directed behaviour in humans. Nature Human Behaviour, 1(8). https://doi.org/10.1038/s41562-017-0146 + +Wolfers, T., Beckmann, C. F., Hoogman, M., Buitelaar, J. K., Franke, B., & Marquand, A. F. (2020). Individual differences v. the average patient: Mapping the heterogeneity in ADHD using normative models. Psychological Medicine, 50(2), 314–323. https://doi.org/10.1017/S0033291719000084 + +Wolfers, T., Rokicki, J., Alnæs, D., Berthet, P., Agartz, I., Kia, S. M., Kaufmann, T., Zabihi, M., Moberget, T., Melle, I., Beckmann, C. F., Andreassen, O. A., Marquand, A. F., & Westlye, L. T. (n.d.). Replicating extensive brain structural heterogeneity in individuals with schizophrenia and bipolar disorder. Human Brain Mapping, n/a(n/a). https://doi.org/10.1002/hbm.25386 + +Zabihi, M., Floris, D. L., Kia, S. M., Wolfers, T., Tillmann, J., Arenas, A. L., Moessnang, C., Banaschewski, T., Holt, R., Baron-Cohen, S., Loth, E., Charman, T., Bourgeron, T., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. (2020). Fractionating autism based on neuroanatomical normative modeling. Translational Psychiatry, 10(1), 1–10. https://doi.org/10.1038/s41398-020-01057-0 + +Zabihi, M., Oldehinkel, M., Wolfers, T., Frouin, V., Goyard, D., Loth, E., Charman, T., Tillmann, J., Banaschewski, T., Dumas, G., Holt, R., Baron-Cohen, S., Durston, S., Bölte, S., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. F. (2019). Dissecting the Heterogeneous Cortical Anatomy of Autism Spectrum Disorder Using Normative Models. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 4(6), 567–578. https://doi.org/10.1016/j.bpsc.2018.11.013 + +Kia, S. M., & Marquand, A. (2018). Normative Modeling of Neuroimaging Data using Scalable Multi-Task Gaussian Processes. ArXiv:1806.01047 [Cs, Stat]. http://arxiv.org/abs/1806.01047 + +Kia, S. M., Beckmann, C. F., & Marquand, A. F. (2018). Scalable Multi-Task Gaussian Process Tensor Regression for Normative Modeling of Structured Variation in Neuroimaging Data. ArXiv:1808.00036 [Cs, Stat]. http://arxiv.org/abs/1808.00036 + +Kia, S. M., Huijsdens, H., Dinga, R., Wolfers, T., Mennes, M., Andreassen, O. A., Westlye, L. T., Beckmann, C. F., & Marquand, A. F. (2020). Hierarchical Bayesian Regression for Multi-site Normative Modeling of Neuroimaging Data. In A. L. Martel, P. Abolmaesumi, D. Stoyanov, D. Mateus, M. A. Zuluaga, S. K. Zhou, D. Racoceanu, & L. Joskowicz (Eds.), Medical Image Computing and Computer Assisted Intervention – MICCAI 2020 (pp. 699–709). Springer International Publishing. https://doi.org/10.1007/978-3-030-59728-3_68 + +Huertas, I., Oldehinkel, M., van Oort, E. S. B., Garcia-Solis, D., Mir, P., Beckmann, C. F., & Marquand, A. F. (2017). A Bayesian spatial model for neuroimaging data based on biologically informed basis functions. NeuroImage, 161, 134–148. https://doi.org/10.1016/j.neuroimage.2017.08.009 + +Fraza, C. J., Dinga, R., Beckmann, C. F., & Marquand, A. F. (2021). Warped Bayesian Linear Regression for Normative Modelling of Big Data. BioRxiv, 2021.04.05.438429. https://doi.org/10.1101/2021.04.05.438429 diff --git a/doc/source/pages/glossary.rst b/doc/source/pages/glossary.rst new file mode 100644 index 00000000..4ed67d03 --- /dev/null +++ b/doc/source/pages/glossary.rst @@ -0,0 +1,2 @@ +Glossary +=========== diff --git a/doc/source/pages/installation.rst b/doc/source/pages/installation.rst new file mode 100644 index 00000000..98c5c9fb --- /dev/null +++ b/doc/source/pages/installation.rst @@ -0,0 +1,100 @@ +Installation +================== + +Basic installation (on a local machine) +******************************************* + +1. Install anaconda3 + +2. Create enviornment + +.. code-block:: bash + + conda create --name + +3. Activate environment + +.. code-block:: bash + + source activate + +4. Install required conda packages + +.. code-block:: bash + + conda install pip pandas scipy + +5. Install PCNtoolkit (plus dependencies) + +.. code-block:: bash + + pip install pcntoolkit + +Alternative installation (on a shared resource) +************************************************** + +1. Make sure conda is available on the system. Otherwise install it first from https://www.anaconda.com/ + +.. code-block:: bash + + conda --version + + +2. Create a conda environment in a shared location + +.. code-block:: bash + + conda create -y python==3.7.7 numpy mkl blas --prefix=/shared/conda/ + + +3. Activate the conda environment + +.. code-block:: bash + + conda activate /shared/conda/ + + +4. Install other dependencies + +.. code-block:: bash + + conda install -y pandas scipy + + +5. Install pip dependencies + +.. code-block:: bash + + pip --no-cache-dir install nibabel sklearn torch glob3 + + +6. Clone the repo + +.. code-block:: bash + + git clone https://github.com/amarquand/PCNtoolkit.git + + +7. Install in the conda environment + +.. code-block:: bash + + cd PCNtoolkit/ + python3 setup.py install + + +8. Test + +.. code-block:: bash + + python -c "import pcntoolkit as pk;print(pk.__file__)" + + +Quickstart usage +**************************** + +For normative modelling, functionality is handled by the ``normative.py`` script, which can be run from the command line, e.g. + +.. code-block:: bash + + python normative.py -c /path/to/training/covariates -t /path/to/test/covariates -r /path/to/test/response/variables /path/to/my/training/response/variables diff --git a/doc/source/pages/modindex.rst b/doc/source/pages/modindex.rst new file mode 100644 index 00000000..b44c2ac5 --- /dev/null +++ b/doc/source/pages/modindex.rst @@ -0,0 +1,50 @@ +Module Index +************ + +.. automodule:: bayesreg + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + +.. automodule:: gp + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + +.. automodule:: normative + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + +.. automodule:: normative_parallel + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + +.. automodule:: trendsurf + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + +.. automodule:: rfa + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + +.. automodule:: fileio + :members: + :undoc-members: + :inherited-members: + :show-inheritance: + +.. automodule:: utils + :members: + :undoc-members: + :inherited-members: + :show-inheritance: \ No newline at end of file diff --git a/doc/source/pages/nm_concept.png b/doc/source/pages/nm_concept.png new file mode 100644 index 00000000..367090c9 Binary files /dev/null and b/doc/source/pages/nm_concept.png differ diff --git a/doc/source/pages/nm_overview.png b/doc/source/pages/nm_overview.png new file mode 100644 index 00000000..b1d794d0 Binary files /dev/null and b/doc/source/pages/nm_overview.png differ diff --git a/doc/source/pages/nm_parallel.png b/doc/source/pages/nm_parallel.png new file mode 100644 index 00000000..8066968c Binary files /dev/null and b/doc/source/pages/nm_parallel.png differ diff --git a/doc/source/pages/nm_plot.jpeg b/doc/source/pages/nm_plot.jpeg new file mode 100644 index 00000000..009dff14 Binary files /dev/null and b/doc/source/pages/nm_plot.jpeg differ diff --git a/doc/source/pages/pcntoolkit_background.rst b/doc/source/pages/pcntoolkit_background.rst new file mode 100644 index 00000000..4f398e24 --- /dev/null +++ b/doc/source/pages/pcntoolkit_background.rst @@ -0,0 +1,178 @@ +PCNtoolkit Background +==================================== + +What is the PCNtoolkit? +**************************** + +Predictive Clinical Neuroscience (PCN) toolkit (formerly nispat) is a python package designed for multi-purpose tasks in clinical neuroimaging, including normative modelling, trend surface modelling in addition to providing implementations of a number of fundamental machine learning algorithms. + +Intro to normative modelling +=============================== + +Normative modelling essentially aims to predict centiles of variance in a response variable (e.g. a region of interest or other neuroimaging-derived measure) on the basis of a set of covariates (e.g. age, clinical scores, diagnosis) A conceptual overview of the approach can be found in this `publication `_. For example, the image below shows an example of a normative model that aims to predict vertex-wise cortical thickness data, essentially fitting a separate model for each vertex. + +.. figure:: ./nm_concept.png + :height: 300px + :align: center + +In practice, this is done by regressing the biological response variables against a set of clinical or demographic covariates. In the instructions that follow, it is helpful to think of these as being stored in matrices as shown below: + +.. figure:: ./nm_overview.png + :height: 300px + :align: center + +There are many options for this, but techniques that provide a distributional form for the centiles are appealing, since they help to estimate extreme centiles more efficiently. Bayesian methods are also beneficial in this regard because they also allow separation of modelling uncertainty from variation in the data. Many applications of normative modelling use Gaussian Process Regression, which is the default method in this toolkit. Typically (but not `always `_), each response variable is estimated independently. + +Data formats +**************************** + +Generally the covariates are specified in text format, roughly following the FSL convention in that the text file should contain one entry +(i.e. subject) per line, with columns space or tab separated and no headers. For example: + +.. code-block:: bash + + head cov.txt + 52 55 94 4.6 + 49 43 59 4.6 + 56 80 63 5.6 + 39 48 42 4.3 + + +For the response variables, the following data formats are supported: + +* NIfTI (e.g. .nii.gz or .img/.hdr) +* CIFTI (e.g. .dtseries.nii) +* Pickle/pandas (e.g. .pkl) +* ASCII text (e.g. .txt, .csv, .tsv) + +For nifti/cifti formats, data should be in timeseries format with subjects along the time dimension and these images will be masked and reshaped into vectors. If no mask is specified, one will be created automatically from the image data. + +Basic usage (command line) +**************************** + +The simplest method to estimate a normative model is using the ``normative.py`` script which can be run from the command line or imported as a python module. For example, the following command will estimate a normative model on the basis of the matrix of covariates and responses specified in cov.txt and resp.txt respectively. These are simply tab or space separated ASCII text files that contain the variables of interest, with one subject per row. + +.. code-block:: bash + + python normative.py -c cov.txt -k 5 -a blr resp.txt + + +The argument ``-a blr`` tells the script to use Bayesian Linear regression rather than the default Gaussian process regression model and ``-k 5`` tells the script to run internal 5-fold cross-validation across all subjects in the covariates and responses files. Alternatively, the model can be evaluated on a separate dataset by specifying test covariates (and optionally also test responses). +The following estimation algorithms are supported + +**Table 1: Estimation algorithms** + +================= ================================= ============================================================================================= +**key value** **Description** **Reference** +----------------- --------------------------------- --------------------------------------------------------------------------------------------- +hbr Hierarchical Bayesian Regression `Kia et al 2020 `_ +blr Bayesian Linear Regression `Huertas et al 2017 `_ +np Neural Processes `Kia et al 2018 `_ +rfa Random Feature Approximation `Rahimi and Recht 2007 `_ +================= ================================= ============================================================================================= + + +Note that keyword arguments can also be specified from the command line to offer additional flexibility. For example, the following command will fit a normative model to the same data, but without standardizing the data first and additionally writing out model coefficients (this is not done by default because they can use a lot of disk space). + +.. code-block:: bash + + python normative.py -c cov.txt -k 5 -a blr resp.txt standardize=False savemodel=True + + +A full set of keyword arguments is provided in the table below. At a minimum, a set of responses and covariates must be provided and either the corresponding number of cross-validation folds or a set of test covariates. + +**Table 2: Keywords and command line arguments** + +============ ========================= ========================================================================================== +**Keyword** **Command line shortcut** **Description** +------------ ------------------------- ------------------------------------------------------------------------------------------ +covfunc -c filename Covariate file +cvfolds -k num_folds Number of cross-validation folds +testcov -t filename Test covariates +testresp -r filename Test responses +maskfile -m filename mask to apply to the response variables (nifti/cifti only) +alg -a algorithm Estimation algorithm: 'gpr' (default), 'blr', 'np', 'hbr' or 'rfa'. See table above. +function -f function function to call (estimate, predict, transfer, extend). See below +standardize -s (skip) Standardize the covariates and response variables using the training data +configparam -x config Pass the value of config to the estimation algorithm (deprecated) +outputsuffix Suffix to apply to the output variables +saveoutput Write output (default = True) +savemodel Save the model coefficients and meta-data (default = False) +warp Warping function to apply to the responses (blr only) +============ ========================= ========================================================================================== + +Basic usage (scripted) +**************************** + +The same can be done by importing the estimate function from ``normative.py``. For example, the following code snippet will: (i) mask the nifti data specified in resp_train.nii.gz using the mask specified (which must have the same voxel size as the response variables) (ii) fit a linear normative model to each voxel, (iii) apply this to make predictions using the test covariates and (iv) compute deviation scores and error metrics by comparing against the true test response variables. + +.. code-block:: python + + from pcntoolkit.normative import estimate + + # estimate a normative model + estimate("cov_train.txt", "resp_train.nii.gz", maskfile="mask.nii.gz", \ + testresp="resp_test.nii.gz", testcov="cov_test.txt", alg="blr") + + +The estimate function does all these operations in a single step. In some cases it may be desirable to separate these steps. For example, if a normative model has been estimated on a large dataset, it may be desirable to save the model before applying it to a new dataset (e.g. from a a different site). For example, the following code snippet will first fit a model, then apply it to a set of dummy covariates so that the normative model can be plotted + +.. code-block:: python + + from pcntoolkit.normative import estimate, predict + + # fit a normative model, using training covariates and responses + # then apply to test dataset. Saved with file suffix '_estimate' + estimate(cov_file_tr, resp_file_tr, testresp=resp_file_te, \ + testcov=cov_file_te, alg='blr', optimizer = 'powell', \ + savemodel=True, standardize = False) + + # make predictions on a set of dummy covariates (with no responses) + # Saved with file suffix '_predict' + yhat, s2 = predict(cov_file_dummy) + +For further information, see the `developer documentation `_. The same can be achieved from the command line, using te ``-f`` argument, for example, by specifying ``-f predict``. + +Paralellising estimation to speed things up +********************************************** + +Normative model estimation is typically quite computationally expensive, especially for large datasets. This is exacerbated by high-resolution data (e.g. voxelwise data). For such cases normative model estimation can be paralellised across multiple compute nodes which can be achieved using the ``normative_parallel.py`` script. This involves splitting the response matrix into a set of batches, each of a specified size, i.e.: + +.. figure:: ./nm_parallel.png + :height: 300px + :align: center + +Each of these are then submitted to a cluster and reassembled once the cluster jobs have been completed. The following code snippet illustrates this procedure: + +.. code-block:: python + + from pcntoolkit.normative_parallel import execute_nm, collect_nm, delete_nm + + # General config parameters + normative_path = '//pcntoolkit/normative.py' + python_path='//bin/python' + working_dir = '//' + log_dir = '//' + + # cluster paramateters + job_name = 'nm_demo' # name for the cluster job + batch_size = 10 # number of models (e.g. voxels) per batch + memory = '4gb' # memory required + duration = '01:00:00' # walltime + cluster = 'torque' + + # fit the model. Specifying binary=True means results will be stored in .pkl format + execute_nm(working_dir, python_path, normative_path, job_name, cov_file.txt, \ + resp_file.pkl, batch_size, memory, duration, cluster_spec=cluster, \ + cv_folds=2, log_path=log_dir, binary=True) + + # wait until jobs complete ... + + # reassemble results + collect_nm(working_dir, job_name, collect=True, binary=True) + + # remove temporary files + delete_nm(working_dir, binary=True) + + +At the present time, only ASCII and pickle format are supported using normative parallel. Note also that it may be necessary to customise the script to support your local cluster architecture. This can be done using fairly obvious modifications to the ``execute_nm()`` function. \ No newline at end of file diff --git a/doc/source/pages/references.rst b/doc/source/pages/references.rst new file mode 100644 index 00000000..20905003 --- /dev/null +++ b/doc/source/pages/references.rst @@ -0,0 +1,32 @@ +.. _referencelist: + +.. title:: List of references + +References +================== + +Marquand, A. F., Wolfers, T., Mennes, M., Buitelaar, J., & Beckmann, C. F. (2016). Beyond Lumping and Splitting: A Review of Computational Approaches for Stratifying Psychiatric Disorders. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 1(5), 433–447. https://doi.org/10.1016/j.bpsc.2016.04.002 + +Marquand, A. F., Rezek, I., Buitelaar, J., & Beckmann, C. F. (2016). Understanding Heterogeneity in Clinical Cohorts Using Normative Models: Beyond Case-Control Studies. Biological Psychiatry, 80(7), 552–561. https://doi.org/10.1016/j.biopsych.2015.12.023 + +Marquand, A. F., Kia, S. M., Zabihi, M., Wolfers, T., Buitelaar, J. K., & Beckmann, C. F. (2019). Conceptualizing mental disorders as deviations from normative functioning. Molecular Psychiatry, 24(10), 1415–1424. https://doi.org/10.1038/s41380-019-0441-1 + +Marquand, A. F., Haak, K. V., & Beckmann, C. F. (2017). Functional corticostriatal connection topographies predict goal directed behaviour in humans. Nature Human Behaviour, 1(8). https://doi.org/10.1038/s41562-017-0146 + +Wolfers, T., Beckmann, C. F., Hoogman, M., Buitelaar, J. K., Franke, B., & Marquand, A. F. (2020). Individual differences v. the average patient: Mapping the heterogeneity in ADHD using normative models. Psychological Medicine, 50(2), 314–323. https://doi.org/10.1017/S0033291719000084 + +Wolfers, T., Rokicki, J., Alnæs, D., Berthet, P., Agartz, I., Kia, S. M., Kaufmann, T., Zabihi, M., Moberget, T., Melle, I., Beckmann, C. F., Andreassen, O. A., Marquand, A. F., & Westlye, L. T. (n.d.). Replicating extensive brain structural heterogeneity in individuals with schizophrenia and bipolar disorder. Human Brain Mapping, n/a(n/a). https://doi.org/10.1002/hbm.25386 + +Zabihi, M., Floris, D. L., Kia, S. M., Wolfers, T., Tillmann, J., Arenas, A. L., Moessnang, C., Banaschewski, T., Holt, R., Baron-Cohen, S., Loth, E., Charman, T., Bourgeron, T., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. (2020). Fractionating autism based on neuroanatomical normative modeling. Translational Psychiatry, 10(1), 1–10. https://doi.org/10.1038/s41398-020-01057-0 + +Zabihi, M., Oldehinkel, M., Wolfers, T., Frouin, V., Goyard, D., Loth, E., Charman, T., Tillmann, J., Banaschewski, T., Dumas, G., Holt, R., Baron-Cohen, S., Durston, S., Bölte, S., Murphy, D., Ecker, C., Buitelaar, J. K., Beckmann, C. F., & Marquand, A. F. (2019). Dissecting the Heterogeneous Cortical Anatomy of Autism Spectrum Disorder Using Normative Models. Biological Psychiatry: Cognitive Neuroscience and Neuroimaging, 4(6), 567–578. https://doi.org/10.1016/j.bpsc.2018.11.013 + +Kia, S. M., & Marquand, A. (2018). Normative Modeling of Neuroimaging Data using Scalable Multi-Task Gaussian Processes. ArXiv:1806.01047 [Cs, Stat]. http://arxiv.org/abs/1806.01047 + +Kia, S. M., Beckmann, C. F., & Marquand, A. F. (2018). Scalable Multi-Task Gaussian Process Tensor Regression for Normative Modeling of Structured Variation in Neuroimaging Data. ArXiv:1808.00036 [Cs, Stat]. http://arxiv.org/abs/1808.00036 + +Kia, S. M., Huijsdens, H., Dinga, R., Wolfers, T., Mennes, M., Andreassen, O. A., Westlye, L. T., Beckmann, C. F., & Marquand, A. F. (2020). Hierarchical Bayesian Regression for Multi-site Normative Modeling of Neuroimaging Data. In A. L. Martel, P. Abolmaesumi, D. Stoyanov, D. Mateus, M. A. Zuluaga, S. K. Zhou, D. Racoceanu, & L. Joskowicz (Eds.), Medical Image Computing and Computer Assisted Intervention – MICCAI 2020 (pp. 699–709). Springer International Publishing. https://doi.org/10.1007/978-3-030-59728-3_68 + +Huertas, I., Oldehinkel, M., van Oort, E. S. B., Garcia-Solis, D., Mir, P., Beckmann, C. F., & Marquand, A. F. (2017). A Bayesian spatial model for neuroimaging data based on biologically informed basis functions. NeuroImage, 161, 134–148. https://doi.org/10.1016/j.neuroimage.2017.08.009 + +Fraza, C. J., Dinga, R., Beckmann, C. F., & Marquand, A. F. (2021). Warped Bayesian Linear Regression for Normative Modelling of Big Data. BioRxiv, 2021.04.05.438429. https://doi.org/10.1101/2021.04.05.438429 diff --git a/doc/source/pages/tutorial_CPC2020.rst b/doc/source/pages/tutorial_CPC2020.rst new file mode 100644 index 00000000..7ac969d8 --- /dev/null +++ b/doc/source/pages/tutorial_CPC2020.rst @@ -0,0 +1,427 @@ +Gaussian Process Regression +============================== + +Created by `Saige Rutherford `_, `Thomas Wolfers `_, `Mariam Zabihi `_ + +View on `GitHub `_ + +Run in `Google Colab `_ + +\______________________________________________________________________________\_ + +Background Story +**************************** + +Morten and Ingrid are concerned about the health of their father, +Nordan. He recently turned 65 years. A few months ago he could not find +his way home. Together, they visit a neurologist/psychiatrist to conduct +a number of cognitive tests. However, those tests were inconclusive. +While Nordan has a relatively low IQ it could not explain his trouble +returning home. + +Recently, the family heard about a new screening technique called +normative modeling with which one can place individuals in reference to +a population norm on for instance measures such as brain volume. Nordan +would like to undertake this procedure to better know what is going on +and to potentially find targets for treatment. Therefore, the family +booked an appointment with you, the normative modeling specialist. To +find out what is going on you compare Nordan’s hyppocampus to the norm +and to a group of persons with Dementia disorders, who have a similar +IQ, age as well as the same sex as Nordan. + +Do your best to get as far as you can. However, you do not need to feel +bad if you cannot complete everything during the tutorial. + +Task 0: Load data and install the pcntoolkit +***************************************************** + +.. code:: ipython3 + + #install normative modeling + !pip install pcntoolkit + +**Option 1:** Connect your Google Drive account, and load data from +Google Drive. Having Google Drive connected will allow you to save any +files created back to your Drive folder. This step will require you to +download the csv files from +`Github `__ +to your computer, and then make a folder in your Google Drive account +and upload the csv files to this folder. + +.. code:: ipython3 + + from google.colab import drive + drive.mount('/content/drive') + + #change dir to data on your google drive + import os + os.chdir('drive/My Drive/name-of-folder-where-you-uploaded-csv-files-from-Github/') #Change this path to match the path to your data in Google Drive + + # code by T. Wolfers + +**Option 2:** Import the files directly from Github, and skip adding +them to Google Drive. + +.. code:: ipython3 + + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_demographics.csv + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_demographics_nordan.csv + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_features.csv + !wget -nc https://raw.githubusercontent.com/saigerutherford/CPC_2020/master/data/cpc_camcan_features_nordan.csv + + # code by S. Rutherford + +TASK 1: Format input data +********************************* + +You have four files. The features and demographics file for the +normsample and two files of the same name for Nordan your test sample. +As one of your coworkers has done the preporcessing and quality control +there are more subjects in the demographics file than in the features +file of the norm sample. Please select the overlap of participants +between those two files. + +*Question for your understanding:* + +1) Why do we have to select the overlap between participants in terms of + featrues and demographics? + +.. code:: ipython3 + + import pandas as pd + + # read in the files. + norm_demographics = pd.read_csv('cpc_camcan_demographics.csv', + sep= ",", + index_col = 0) + norm_features = pd.read_csv('cpc_camcan_features.csv', + sep=",", + index_col = 0) + + # check columns through print [there are other better options] + print(norm_demographics) + print(norm_features) + + # find overlap in terms of participants between norm_sample_features and + # norm_sample_demographics + + norm_demographics_features = pd.concat([norm_demographics, norm_features], + axis = 1, + join = 'inner') # inner checks overlap + # outer combines + print(norm_demographics_features) + + # code by T. Wolfers + +TASK 2: Prepare the covariate_normsample and testresponse_normsample file. +********************************************************************************** + +As mentioned in the introductory presentation those files need a +specific format and the entries need to be seperated by spaces. Use +whatever method you know to prepare those files based on the data +provided in TASK 1. Save those files in .txt format in your drive. Also +get rid of the column names and participant IDs. + +Given that we only have limited time in this practical we have to make a +selection for the features based on your prior knowledge. With the +information in mind that Nordan does not remember his way home, which +subfield of the hyppocampus is probably a good target for the +investigations? Select a maximum of four hyppocampal regions as +features. + +NOTE: Normative modeling is a screening tool we just make this selection +due to time constraints, in reality we build these models on millions of +putative biomarkers that are not restricted to brain imaging. + +*Qestions for your understanding:* + +2) What is the requirement for the features in terms of variable + properties (e.g. dicotomous or continous)? 3) What is the requirement + for the covariates in terms of these properties? 4) What are the + requirements for both together? 5) How does this depent on the + algorithm used? + +.. code:: ipython3 + + # perpare covariate_normsample for sex and age + covariate_normsample = norm_demographics_features[['sex', + 'age']] + + covariate_normsample.to_csv('covariate_normsample.txt', + sep = ' ', + header = False, + index = False) + + # perpare features_normsample for relevant hyppocampal subfields + features_normsample = norm_demographics_features[['left_CA1', + 'left_CA3', + 'right_CA1', + 'right_CA3']] + + features_normsample.to_csv('features_normsample.txt', + sep = ' ', + header = False, + index = False) + + # code by T. Wolfers + +TASK 3: Estimate normative model +*************************************** + +Once you have prepared and saved all the necessary files. Look at the +pcntoolkit for running normative modeling. Select an appropritate method +set up the toolkit and run your analyses using 2-fold cross validation +in the normsample. Change the output suffix from estimate to ’_2fold’. + +HINT: You primarily need the estimate function. + +SUGGESTION: While this process is running you can go to the next TASK 4, +you will have no doubt when it is correctly running. + +*Question for your understaning:* + +6) What does cvfolds mean and why do we use it? 7) What is the output of + the estimate function and what does it mean? + +.. code:: ipython3 + + import pcntoolkit as pcn + + # run normative modeling using 2-fold cross-validation + + pcn.normative.estimate(covfile = 'covariate_normsample.txt', + respfile = 'features_normsample.txt', + cvfolds = 2, + alg = 'gpr', + outputsuffix = '_2fold') + + # code by T. Wolfers + +TASK 4: Estimate the forward model of the normative model +***************************************************************** + +In order to visulize the normative trajectories you first need to run +the forward model. To this end you need to set up an appropriate +covariate_forwardmodel file that covers the age range appropriately for +both sexes. Save this file as .txt . Then you can input the files you +made in TASK 1 as well as the file you made now and run the forward +model using the appropriate specifications. + +*Question for your understaning:* + +8) What is yhat and ys2? 9) Why does the output of the forward model + does not inlcude the Z-scores? + +.. code:: ipython3 + + # create covariate_forwardmodel.txt file + covariate_forwardmodel = {'sex': [0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1], + 'age': [20, 30, 40, 50, 60, 70, 80, + 20, 30, 40, 50, 60, 70, 80]} + covariate_forwardmodel = pd.DataFrame(data=covariate_forwardmodel) + + covariate_forwardmodel.to_csv('covariate_forwardmodel.txt', + sep = ' ', + header = False, + index = False) + + # estimate forward model + pcn.normative.estimate(covfile = 'covariate_normsample.txt', + respfile = 'features_normsample.txt', + testcov = 'covariate_forwardmodel.txt', + cvfolds = None, + alg = 'gpr', + outputsuffix = '_forward') + + # code by T. Wolfers + +TASK 5: Visualize forward model +*************************************** + +Visualize the forward model of the normative model similar to the figure +below. + +.. figure:: nm_plot.jpeg + +HINT: First create a function that calculates the confidence intervals +and then plot yhat, y2 of the forward model. Finally, plot the data of +individual participants. + +.. code:: ipython3 + + import numpy as np + import matplotlib.pyplot as plt + + # confidence interval calculation at x_forward + def confidence_interval(s2,x,z): + CI=np.zeros((len(x_forward),4)) + for i,xdot in enumerate(x_forward): + ci_inx=np.isin(x,xdot) + S2=s2[ci_inx] + S_hat=np.mean(S2,axis=0) + n=S2.shape[0] + CI[i,:]=z*np.power(S_hat/n,.5) + return CI + + + feature_names=['left_CA1','left_CA3','right_CA1','right_CA3'] + sex_covariates=[ 'Female','Male'] + # Creating plots for Female and male + for i,sex in enumerate(sex_covariates): + #forward model data + forward_yhat = pd.read_csv('yhat_forward.txt', sep = ' ', header=None) + yhat_forward=forward_yhat.values + yhat_forward=yhat_forward[7*i:7*(i+1)] + x_forward=[20, 30, 40, 50, 60, 70, 80] + + # Find the index of the data exclusively for one sex. Female:0, Male: 1 + inx=np.where(covariate_normsample.sex==i)[0] + x=covariate_normsample.values[inx,1] + # actual data + y = pd.read_csv('features_normsample.txt', sep = ' ', header=None) + y=y.values[inx] + # confidence Interval yhat+ z *(std/n^.5)-->.95 % CI:z=1.96, 99% CI:z=2.58 + s2= pd.read_csv('ys2_2fold.txt', sep = ' ', header=None) + s2=s2.values[inx] + + CI_95=confidence_interval(s2,x,1.96) + CI_99=confidence_interval(s2,x,2.58) + + # Creat a trejactroy for each point + for j,name in enumerate(feature_names): + fig=plt.figure() + ax=fig.add_subplot(111) + ax.plot(x_forward,yhat_forward[:,j], linewidth=4, label='Normative trejactory') + + + ax.plot(x_forward,CI_95[:,j]+yhat_forward[:,j], linewidth=2,linestyle='--',c='g', label='95% confidence interval') + ax.plot(x_forward,-CI_95[:,j]+yhat_forward[:,j], linewidth=2,linestyle='--',c='g') + + ax.plot(x_forward,CI_99[:,j]+yhat_forward[:,j], linewidth=1,linestyle='--',c='k', label='99% confidence interval') + ax.plot(x_forward,-CI_99[:,j]+yhat_forward[:,j], linewidth=1,linestyle='--',c='k') + + ax.scatter(x,y[:,j],c='r', label=name) + plt.legend(loc='upper left') + plt.title('Normative trejectory of' +name+' in '+sex+' cohort') + plt.show() + plt.close() + + # code by M. Zabihi + +TASK 6: Apply the normative model to Nordan’s data and the dementia patients. +************************************************************************************ + +.. code:: ipython3 + + # read in Nordan's as well as the patient's demographics and features + demographics_nordan = pd.read_csv('cpc_camcan_demographics_nordan.csv', + sep= ",", + index_col = 0) + features_nordan = pd.read_csv('cpc_camcan_features_nordan.csv', + sep=",", + index_col = 0) + + # create a covariate file for Nordan's as well as the patient's demograhpics + covariate_nordan = demographics_nordan[['sex', + 'age']] + covariate_nordan.to_csv('covariate_nordan.txt', + sep = ' ', + header = False, + index = False) + + # create the corresponding feature file + features_nordan = features_nordan[['left_CA1', + 'left_CA3', + 'right_CA1', + 'right_CA3']] + + features_nordan.to_csv('features_nordan.txt', + sep = ' ', + header = False, + index = False) + + # apply normative modeling + pcn.normative.estimate(covfile = 'covariate_normsample.txt', + respfile = 'features_normsample.txt', + testcov = 'covariate_nordan.txt', + testresp = 'features_nordan.txt', + cvfolds = None, + alg = 'gpr', + outputsuffix = '_nordan') + + # code by T. Wolfers + +TASK 7: In which hyppocampal subfield(s) does Nordan deviate extremely? +******************************************************************************* + +No coding necessary just create a presentation which includes +recommendations to Nordan and his family. Use i) \|Z\| > 3.6 ii) \|Z\| > +1.96 as definitions for extreme normative deviations. + +TASK 8 (OPTIONAL): Implement a function that calculates percentage change. +********************************************************************************** + +Percentage change = :math:`\frac{x1 - x2}{|x2|}*100` + +.. code:: ipython3 + + # function that calculates percentage change + def calculate_percentage_change(x1, x2): + percentage_change = ((x1 - x2) / abs(x2)) * 100 + return percentage_change + + # code by T. Wolfers + +TASK 9 (OPTIONAL): Visualize percent change +**************************************************** + +Plot the prercentage change in Yhat of the forward model in reference to +age 20. Do that for both sexes seperately. + +.. code:: ipython3 + + import matplotlib.pyplot as plt + + forward_yhat = pd.read_csv('yhat_forward.txt', sep = ' ', header=None) + + # You can indicate here which hypocampal subfield you like to visualize + hyppocampal_subfield = 0 + + percentage_change_female = [] + percentage_change_male = [] + count = 0 + lengths = len(forward_yhat[hyppocampal_subfield]) + for entry in forward_yhat[hyppocampal_subfield]: + if count > 0 and count < 7: + loop_percentage_change_female = calculate_percentage_change(entry, + forward_yhat.iloc[0, + hyppocampal_subfield]) + percentage_change_female.append(loop_percentage_change_female) + elif count > 7: + loop_percentage_change_male = calculate_percentage_change(entry, + forward_yhat.iloc[9, + hyppocampal_subfield]) + percentage_change_male.append(loop_percentage_change_male) + count = count + 1 + + names = ['30 compared to 20 years', + '40 compared to 20 years', + '50 compared to 20 years', + '60 compared to 20 years', + '70 compared to 20 years', + '80 compared to 20 years'] + + # females + plt.subplot(121) + plt.bar(names, percentage_change_female) + plt.xticks(rotation=90) + plt.ylim(-20, 2) + + # males + plt.subplot(122) + plt.bar(names, percentage_change_male) + plt.xticks(rotation=90) + plt.ylim(-20, 2) + + # code by T. Wolfers diff --git a/doc/source/pages/tutorial_HBR.rst b/doc/source/pages/tutorial_HBR.rst new file mode 100644 index 00000000..dd67ee33 --- /dev/null +++ b/doc/source/pages/tutorial_HBR.rst @@ -0,0 +1,339 @@ +Hierarchical Bayesian Regression +====================================================================================== + +Hierarchical Bayesian Regression Normative Modelling and Transfer onto unseen site. + +This notebook will go through basic data preparation (training and +testing set, `see Saige’s +tutorial `__ +on Normative Modelling for more detail), the actual training of the +models, and will finally describe how to transfer the trained models +onto unseen sites. The approach is described in detail in these papers: + +- `Kia et al 2020 `_. +- `Kia et al 2021 `_. + +View on `GitHub `_ + +While we run everything on a single compute node here, for larger datasets, it is probably desirbel to paralelize this using the normative_parallel functionality. + +Run in `Google Colab `_ + + +Created by `Saige Rutherford `__, adapted/edited by Andre Marquand and Pierre Berthet + + +.. container:: + +Step 0: Install necessary libraries & grab data files +******************************************************* + + +.. code:: ipython3 + + ! pip install numpy scipy arviz pymc3 matplotlib pandas + ! pip uninstall -y Theano-PyMC # conflicts with Theano on some environments + ! pip install pcntoolkit==0.19 + +For this tutorial we will use data from the `Functional Connectom +Project FCON1000 `__ to create a +multi-site dataset. + +The dataset contains some cortical measures (eg thickness), processed by +Freesurfer 6.0, and some covariates (eg age, site, gender). + +First we import the required package, and create a working directory. + +.. code:: ipython3 + + import os + import pandas as pd + import pcntoolkit as ptk + import numpy as np + import pickle + from matplotlib import pyplot as plt + +.. code:: ipython3 + + processing_dir = "HBR_demo/" # replace with a path to your working directory + if not os.path.isdir(processing_dir): + os.makedirs(processing_dir) + os.chdir(processing_dir) + processing_dir = os.getcwd() + +Overview +^^^^^^^^ + +Here we get the FCON dataset, remove the ICBM site for later transfer, +assign some site id to the different scanner sites and print an overview +of the left hemisphere mean raw cortical thickness as a function of age, +color coded by the various sites: + +.. code:: ipython3 + + fcon = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000.csv') + + icbm = fcon.loc[fcon['site'] == 'ICBM'] + icbm['sitenum'] = 0 + fcon = fcon.loc[fcon['site'] != 'ICBM'] + + sites = fcon['site'].unique() + fcon['sitenum'] = 0 + + f, ax = plt.subplots(figsize=(12, 12)) + + for i,s in enumerate(sites): + idx = fcon['site'] == s + fcon['sitenum'].loc[idx] = i + + print('site',s, sum(idx)) + ax.scatter(fcon['age'].loc[idx], fcon['lh_MeanThickness_thickness'].loc[idx]) + + ax.legend(sites) + ax.set_ylabel('LH mean cortical thickness [mm]') + ax.set_xlabel('age') + + +Step 1: Prepare training and testing sets +****************************************** + +Then we randomly split half of the samples (participants) to be either +in the training or in the testing samples. We do this for the remaing +FCON dataset and for the ICBM data. The transfer function will also +require a training and a test sample. + +The numbers of samples per sites used for training and for testing are +then displayed. + +.. code:: ipython3 + + tr = np.random.uniform(size=fcon.shape[0]) > 0.5 + te = ~tr + + fcon_tr = fcon.loc[tr] + fcon_te = fcon.loc[te] + + tr = np.random.uniform(size=icbm.shape[0]) > 0.5 + te = ~tr + + icbm_tr = icbm.loc[tr] + icbm_te = icbm.loc[te] + + print('sample size check') + for i,s in enumerate(sites): + idx = fcon_tr['site'] == s + idxte = fcon_te['site'] == s + print(i,s, sum(idx), sum(idxte)) + + # Uncomment the following lines if you want to keep a defined version of the sets + # fcon_tr.to_csv('/Users/andmar/data/sairut/data/fcon1000_tr.csv') + # fcon_te.to_csv('/Users/andmar/data/sairut/data/fcon1000_te.csv') + # icbm_tr.to_csv('/Users/andmar/data/sairut/data/fcon1000_icbm_tr.csv') + # icbm_te.to_csv('/Users/andmar/data/sairut/data/fcon1000_icbm_te.csv') + +Otherwise you can just load these pre defined subsets: + +.. code:: ipython3 + + # Optional + fcon_tr = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_tr.csv') + fcon_te = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_te.csv') + icbm_tr = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_icbm_tr.csv') + icbm_te = pd.read_csv('https://raw.githubusercontent.com/predictive-clinical-neuroscience/PCNtoolkit-demo/main/data/fcon1000_icbm_te.csv') + +Step 2: Configure HBR inputs: covariates, measures and batch effects +********************************************************************* + +We will here only use the mean cortical thickness for the Right and Left +hemisphere: two idps. + +.. code:: ipython3 + + idps = ['rh_MeanThickness_thickness','lh_MeanThickness_thickness'] + +As input to the model, we need covariates (used to describe predictable +source of variability (fixed effects), here ‘age’), measures (here +cortical thickness on two idps), and batch effects (random source of +variability, here ‘scanner site’ and ‘sex’). + +``X`` corresponds to the covariate(s) + +``Y`` to the measure(s) + +``batch_effects`` to the random effects + +We need these values both for the training (``_train``) and for the +testing set (``_test``). + +.. code:: ipython3 + + X_train = (fcon_tr['age']/100).to_numpy(dtype=float) + Y_train = fcon_tr[idps].to_numpy(dtype=float) + batch_effects_train = fcon_tr[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_train.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_train), file) + with open('Y_train.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_train), file) + with open('trbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_train), file) + + + X_test = (fcon_te['age']/100).to_numpy(dtype=float) + Y_test = fcon_te[idps].to_numpy(dtype=float) + batch_effects_test = fcon_te[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_test.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_test), file) + with open('Y_test.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_test), file) + with open('tsbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_test), file) + + # a simple function to quickly load pickle files + def ldpkl(filename: str): + with open(filename, 'rb') as f: + return pickle.load(f) + +Step 3: Files and Folders grooming +*************************************** + +.. code:: ipython3 + + respfile = os.path.join(processing_dir, 'Y_train.pkl') # measurements (eg cortical thickness) of the training samples (columns: the various features/ROIs, rows: observations or subjects) + covfile = os.path.join(processing_dir, 'X_train.pkl') # covariates (eg age) the training samples (columns: covariates, rows: observations or subjects) + + testrespfile_path = os.path.join(processing_dir, 'Y_test.pkl') # measurements for the testing samples + testcovfile_path = os.path.join(processing_dir, 'X_test.pkl') # covariate file for the testing samples + + trbefile = os.path.join(processing_dir, 'trbefile.pkl') # training batch effects file (eg scanner_id, gender) (columns: the various batch effects, rows: observations or subjects) + tsbefile = os.path.join(processing_dir, 'tsbefile.pkl') # testing batch effects file + + output_path = os.path.join(processing_dir, 'Models/') # output path, where the models will be written + log_dir = os.path.join(processing_dir, 'log/') # + if not os.path.isdir(output_path): + os.mkdir(output_path) + if not os.path.isdir(log_dir): + os.mkdir(log_dir) + + outputsuffix = '_estimate' # a string to name the output files, of use only to you, so adapt it for your needs. + +Step 4: Estimating the models +****************************** + +Now we have everything ready to estimate the normative models. The +``estimate`` function only needs the training and testing sets, each +divided in three datasets: covariates, measures and batch effects. We +obviously specify ``alg=hbr`` to use the hierarchical bayesian +regression method, well suited for the multi sites datasets. The +remaining arguments are basic data management: where the models, logs, +and output files will be written and how they will be named. + +.. code:: ipython3 + + ptk.normative.estimate(covfile=covfile, + respfile=respfile, + tsbefile=tsbefile, + trbefile=trbefile, + alg='hbr', + log_path=log_dir, + binary=True, + output_path=output_path, testcov= testcovfile_path, + testresp = testrespfile_path, + outputsuffix=outputsuffix, savemodel=True) + +Here some analyses can be done, there are also some error metrics that +could be of interest. This is covered in step 6 and in `Saige’s +tutorial `__ +on Normative Modelling. + +Step 5: Transfering the models to unseen sites +************************************************* + +Similarly to what was done before for the FCON data, we also need to +prepare the ICBM specific data, in order to run the transfer function: +training and testing set of covariates, measures and batch effects: + +.. code:: ipython3 + + X_adapt = (icbm_tr['age']/100).to_numpy(dtype=float) + Y_adapt = icbm_tr[idps].to_numpy(dtype=float) + batch_effects_adapt = icbm_tr[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_adaptation.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_adapt), file) + with open('Y_adaptation.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_adapt), file) + with open('adbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_adapt), file) + + # Test data (new dataset) + X_test_txfr = (icbm_te['age']/100).to_numpy(dtype=float) + Y_test_txfr = icbm_te[idps].to_numpy(dtype=float) + batch_effects_test_txfr = icbm_te[['sitenum','sex']].to_numpy(dtype=int) + + with open('X_test_txfr.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(X_test_txfr), file) + with open('Y_test_txfr.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(Y_test_txfr), file) + with open('txbefile.pkl', 'wb') as file: + pickle.dump(pd.DataFrame(batch_effects_test_txfr), file) + + +.. code:: ipython3 + + respfile = os.path.join(processing_dir, 'Y_adaptation.pkl') + covfile = os.path.join(processing_dir, 'X_adaptation.pkl') + testrespfile_path = os.path.join(processing_dir, 'Y_test_txfr.pkl') + testcovfile_path = os.path.join(processing_dir, 'X_test_txfr.pkl') + trbefile = os.path.join(processing_dir, 'adbefile.pkl') + tsbefile = os.path.join(processing_dir, 'txbefile.pkl') + + log_dir = os.path.join(processing_dir, 'log_transfer/') + output_path = os.path.join(processing_dir, 'Transfer/') + model_path = os.path.join(processing_dir, 'Models/') # path to the previously trained models + outputsuffix = '_transfer' # suffix added to the output files from the transfer function + +Here, the difference is that the transfer function needs a model path, +which points to the models we just trained, and new site data (training +and testing). That is basically the only difference. + +.. code:: ipython3 + + yhat, s2, z_scores = ptk.normative.transfer(covfile=covfile, + respfile=respfile, + tsbefile=tsbefile, + trbefile=trbefile, + model_path = model_path, + alg='hbr', + log_path=log_dir, + binary=True, + output_path=output_path, + testcov= testcovfile_path, + testresp = testrespfile_path, + outputsuffix=outputsuffix, + savemodel=True) + + +And that is it, you now have models that benefited from prior knowledge +about different scanner sites to learn on unseen sites. + +Step 6: Interpreting model performance +***************************************** + +Output evaluation metrics definitions + +================= ====================================================================================================== +**key value** **Description** +----------------- ------------------------------------------------------------------------------------------------------ +yhat predictive mean +ys2 predictive variance +nm normative model +Z deviance scores +Rho Pearson correlation between true and predicted responses +pRho parametric p-value for this correlation +RMSE root mean squared error between true/predicted responses +SMSE standardised mean squared error +EV explained variance +MSLL mean standardized log loss `See page 23 `_ +================= ====================================================================================================== diff --git a/doc/source/pages/tutorial_ROIcorticalthickness.rst b/doc/source/pages/tutorial_ROIcorticalthickness.rst new file mode 100644 index 00000000..38ac31e8 --- /dev/null +++ b/doc/source/pages/tutorial_ROIcorticalthickness.rst @@ -0,0 +1,724 @@ +Bayesian Linear Regression +============================ + +Normative Modeling Tutorial Using Multi-Site Cortical Thickness Data and Bayesian Linear Regression. + +This notebook will prepare the data for normative modelling (assembling +data matrices from different datasets, preparing the covariates etc). + +View on `GitHub `_ + +Run in `Google Colab `_ + +Created by `Saige Rutherford `__ + +.. raw:: html + +
        + +.. raw:: html + +
        + +Step 0: Install necessary libraries & grab data files +******************************************************* + +.. code:: ipython3 + + ! git clone https://github.com/predictive-clinical-neuroscience/PCNtoolkit-demo.git + +.. code:: ipython3 + + import os + +.. code:: ipython3 + + # set this path to the git cloned PCNtoolkit-demo repository --> Uncomment whichever line you need for either running on your own computer or on Google Colab. + #os.chdir('/Users/saigerutherford/repos/PCNtoolkit-demo/') # if running on your own computer, use this line (but obvi change the path) + #os.chdir('PCNtoolkit-demo/') # if running on Google Colab, use this line + +.. code:: ipython3 + + ! pip install -r requirements.txt + + +Step 1: Prepare covariate data +******************************** + +For this tutorial we will use data from the `Human Connectome Project +Young Adult +study `__, +`CAMCAN `__, and +`IXI `__ to create a +multi-site dataset. + +Our first step is to prepare and combine the covariate (age & sex) data +from each site. + +.. code:: ipython3 + + import pandas as pd + import numpy as np + import matplotlib.pyplot as plt + import seaborn as sns + import joypy + from sklearn.model_selection import train_test_split + from pcntoolkit.normative import estimate, evaluate + from pcntoolkit.utils import create_bspline_basis, compute_MSLL + +.. code:: ipython3 + + hcp = pd.read_csv('data/HCP1200_age_gender.csv') + cam = pd.read_csv('data/cam_age_gender.csv') + ixi = pd.read_csv('data/IXI_age_gender.csv') + +.. code:: ipython3 + + cam_hcp = pd.merge(hcp, cam, how='outer') + +.. code:: ipython3 + + cov = pd.merge(cam_hcp, ixi, how='outer') + +.. code:: ipython3 + + sns.set(font_scale=1.5, style='darkgrid') + +.. code:: ipython3 + + sns.displot(cov, x="age", hue="site", multiple="stack", height=6) + +.. code:: ipython3 + + cov.groupby(['site']).describe() + +Step 2: Prepare brain data +****************************** + +Next we will format and combine the MRI data. We are using cortical +thickness maps that are created by running recon-all from Freesurfer 6. +We need to merge together the left and right hemisphere text files for +each site, and then combine the different sites into a single dataframe. +We reduce the dimensionality of our data by using ROIs from the +Desikan-Killiany atlas. + +Here is some psuedo-code (run from a terminal in the folder that has all +subject’s recon-all output folders) that was used to extract these ROIs: + +.. code:: ipython3 + + export SUBJECTS_DIR=/path/to/study/freesurfer_data/ + aparcstats2table --subject sub-* --hemi lh --meas thickness --tablefile HCP1200_aparc_lh_thickness.txt + aparcstats2table --subject sub-* --hemi rh --meas thickness --tablefile HCP1200_aparc_rh_thickness.txt + +.. code:: ipython3 + + cam = pd.read_csv('data/CAMCAN_aparc_thickness.csv') + hcpya = pd.read_csv('data/HCP1200_aparc_thickness.csv') + ixi = pd.read_csv('data/IXI_aparc_thickness.csv') + +.. code:: ipython3 + + hcpya_cam = pd.merge(hcpya, cam, how='outer') + +.. code:: ipython3 + + brain_all = pd.merge(ixi, hcpya_cam, how='outer') + +We also want to include the `Euler +number `__ as a +covariate. So we extracted the euler number from each subject’s +recon-all output folder into a text file and we now need to format and +combine these into our brain dataframe. + +.. code:: ipython3 + + hcp_euler = pd.read_csv('data/hcp-ya_euler.csv') + cam_euler = pd.read_csv('data/cam_euler.csv') + ixi_euler = pd.read_csv('data/ixi_euler.csv') + +.. code:: ipython3 + + hcp_euler['site'] = 'hcp' + cam_euler['site'] = 'cam' + ixi_euler['site'] = 'ixi' + +.. code:: ipython3 + + hcp_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True) + cam_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True) + ixi_euler.replace(r'^\s*$', np.nan, regex=True, inplace=True) + +.. code:: ipython3 + + hcp_euler.dropna(inplace=True) + cam_euler.dropna(inplace=True) + ixi_euler.dropna(inplace=True) + +.. code:: ipython3 + + hcp_euler['rh_euler'] = hcp_euler['rh_euler'].astype(int) + hcp_euler['lh_euler'] = hcp_euler['lh_euler'].astype(int) + cam_euler['rh_euler'] = cam_euler['rh_euler'].astype(int) + cam_euler['lh_euler'] = cam_euler['lh_euler'].astype(int) + ixi_euler['rh_euler'] = ixi_euler['rh_euler'].astype(int) + ixi_euler['lh_euler'] = ixi_euler['lh_euler'].astype(int) + +.. code:: ipython3 + + hcp_cam_euler = pd.merge(hcp_euler, cam_euler, how='outer') + +.. code:: ipython3 + + df_euler = pd.merge(ixi_euler, hcp_cam_euler, how='outer') + +Finally, we need to center the euler number for each site. The euler +number is very site-specific so in order to use the same exclusion +threshold across sites we need to center the site by subtracting the +site median from all subjects at a site. Then we will take the square +root and multiply by negative one and exclude any subjects with a square +root above 10. This choice of threshold is fairly random. If possible +all of your data should be visually inspected to verify that the data +inclusion is not too strict or too lenient. + +.. code:: ipython3 + + df_euler['avg_euler'] = df_euler[['lh_euler','rh_euler']].mean(axis=1) + +.. code:: ipython3 + + df_euler.groupby(by='site').median() + +.. code:: ipython3 + + df_euler['site_median'] = df_euler['site'] + +.. code:: ipython3 + + df_euler['site_median'] = df_euler['site_median'].replace({'hcp':-43,'cam':-61,'ixi':-56}) + +.. code:: ipython3 + + df_euler['avg_euler_centered'] = df_euler['avg_euler'] - df_euler['site_median'] + +.. code:: ipython3 + + df_euler['avg_euler_centered_neg'] = df_euler['avg_euler_centered']*-1 + +.. code:: ipython3 + + df_euler['avg_euler_centered_neg_sqrt'] = np.sqrt(np.absolute(df_euler['avg_euler_centered_neg'])) + +.. code:: ipython3 + + pd.set_option('display.max_rows', 500) + pd.set_option('display.max_columns', 500) + pd.set_option('display.width', 1000) + #create a color gradent function to be used in the colormap parameter + def color_gradient(x=0.0, start=(0, 0, 0), stop=(1, 1, 1)): + r = np.interp(x, [0, 1], [start[0], stop[0]]) + g = np.interp(x, [0, 1], [start[1], stop[1]]) + b = np.interp(x, [0, 1], [start[2], stop[2]]) + return r, g, b#show the table + #plot the figure + plt.figure(dpi=380) + fig, axes = joypy.joyplot(df_euler, column=['avg_euler_centered_neg_sqrt'], overlap=2.5, by="site", ylim='own', fill=True, figsize=(6,6) + , legend=False, xlabels=True, ylabels=True, colormap=lambda x: color_gradient(x, start=(.08, .45, .8),stop=(.8, .34, .44)) + , alpha=0.6, linewidth=.5, linecolor='w', fade=True) + plt.title('sqrt(-Euler Number), median centered', fontsize=18, color='black', alpha=1) + plt.xlabel('sqrt(-Euler number)', fontsize=14, color='black', alpha=1) + plt.ylabel('Site', fontsize=14, color='black', alpha=1) + plt.show + +.. code:: ipython3 + + brain = pd.merge(df_euler, brain_all, how='inner') + +.. code:: ipython3 + + len(brain) + +.. code:: ipython3 + + brain_good = brain.query('avg_euler_centered_neg_sqrt < 10') + +.. code:: ipython3 + + len(brain_good) + +We lose 63 subjects because they have a large euler number. + +Step 3: Combine covariate & cortical thickness dataframes +************************************************************* + +Even though the normative modeling code needs the covariate and features +(cortical thickness) in separate text files, we first need to merge them +together to make sure that we have the same subjects in each file and +that the rows (representing subjects) align. + +.. code:: ipython3 + + # make sure to use how="inner" so that we only include subjects that have data in both the covariate and the cortical thickness files + all_data = pd.merge(brain_good, cov, how='inner') + +Step 4: Format dataframes to run normative models +**************************************************** + +.. code:: ipython3 + + from sklearn.model_selection import train_test_split + +.. code:: ipython3 + + # Remove any subjects that have NaN variables in any of the columns + all_data.dropna(subset=['lh_bankssts_thickness', + 'lh_caudalanteriorcingulate_thickness', + 'lh_caudalmiddlefrontal_thickness', 'lh_cuneus_thickness', + 'lh_entorhinal_thickness', 'lh_fusiform_thickness', + 'lh_inferiorparietal_thickness', 'lh_inferiortemporal_thickness', + 'lh_isthmuscingulate_thickness', 'lh_lateraloccipital_thickness', + 'lh_lateralorbitofrontal_thickness', 'lh_lingual_thickness', + 'lh_medialorbitofrontal_thickness', 'lh_middletemporal_thickness', + 'lh_parahippocampal_thickness', 'lh_paracentral_thickness', + 'lh_parsopercularis_thickness', 'lh_parsorbitalis_thickness', + 'lh_parstriangularis_thickness', 'lh_pericalcarine_thickness', + 'lh_postcentral_thickness', 'lh_posteriorcingulate_thickness', + 'lh_precentral_thickness', 'lh_precuneus_thickness', + 'lh_rostralanteriorcingulate_thickness', + 'lh_rostralmiddlefrontal_thickness', 'lh_superiorfrontal_thickness', + 'lh_superiorparietal_thickness', 'lh_superiortemporal_thickness', + 'lh_supramarginal_thickness', 'lh_frontalpole_thickness', + 'lh_temporalpole_thickness', 'lh_transversetemporal_thickness', + 'lh_insula_thickness', 'lh_MeanThickness_thickness', + 'rh_bankssts_thickness', 'rh_caudalanteriorcingulate_thickness', + 'rh_caudalmiddlefrontal_thickness', 'rh_cuneus_thickness', + 'rh_entorhinal_thickness', 'rh_fusiform_thickness', + 'rh_inferiorparietal_thickness', 'rh_inferiortemporal_thickness', + 'rh_isthmuscingulate_thickness', 'rh_lateraloccipital_thickness', + 'rh_lateralorbitofrontal_thickness', 'rh_lingual_thickness', + 'rh_medialorbitofrontal_thickness', 'rh_middletemporal_thickness', + 'rh_parahippocampal_thickness', 'rh_paracentral_thickness', + 'rh_parsopercularis_thickness', 'rh_parsorbitalis_thickness', + 'rh_parstriangularis_thickness', 'rh_pericalcarine_thickness', + 'rh_postcentral_thickness', 'rh_posteriorcingulate_thickness', + 'rh_precentral_thickness', 'rh_precuneus_thickness', + 'rh_rostralanteriorcingulate_thickness', + 'rh_rostralmiddlefrontal_thickness', 'rh_superiorfrontal_thickness', + 'rh_superiorparietal_thickness', 'rh_superiortemporal_thickness', + 'rh_supramarginal_thickness', 'rh_frontalpole_thickness', + 'rh_temporalpole_thickness', 'rh_transversetemporal_thickness', + 'rh_insula_thickness', 'rh_MeanThickness_thickness','age','sex'], inplace=True) + +Separate the covariate & features into their own dataframes + +.. code:: ipython3 + + all_data_features = all_data[['lh_bankssts_thickness', + 'lh_caudalanteriorcingulate_thickness', + 'lh_caudalmiddlefrontal_thickness', 'lh_cuneus_thickness', + 'lh_entorhinal_thickness', 'lh_fusiform_thickness', + 'lh_inferiorparietal_thickness', 'lh_inferiortemporal_thickness', + 'lh_isthmuscingulate_thickness', 'lh_lateraloccipital_thickness', + 'lh_lateralorbitofrontal_thickness', 'lh_lingual_thickness', + 'lh_medialorbitofrontal_thickness', 'lh_middletemporal_thickness', + 'lh_parahippocampal_thickness', 'lh_paracentral_thickness', + 'lh_parsopercularis_thickness', 'lh_parsorbitalis_thickness', + 'lh_parstriangularis_thickness', 'lh_pericalcarine_thickness', + 'lh_postcentral_thickness', 'lh_posteriorcingulate_thickness', + 'lh_precentral_thickness', 'lh_precuneus_thickness', + 'lh_rostralanteriorcingulate_thickness', + 'lh_rostralmiddlefrontal_thickness', 'lh_superiorfrontal_thickness', + 'lh_superiorparietal_thickness', 'lh_superiortemporal_thickness', + 'lh_supramarginal_thickness', 'lh_frontalpole_thickness', + 'lh_temporalpole_thickness', 'lh_transversetemporal_thickness', + 'lh_insula_thickness', 'lh_MeanThickness_thickness', + 'rh_bankssts_thickness', 'rh_caudalanteriorcingulate_thickness', + 'rh_caudalmiddlefrontal_thickness', 'rh_cuneus_thickness', + 'rh_entorhinal_thickness', 'rh_fusiform_thickness', + 'rh_inferiorparietal_thickness', 'rh_inferiortemporal_thickness', + 'rh_isthmuscingulate_thickness', 'rh_lateraloccipital_thickness', + 'rh_lateralorbitofrontal_thickness', 'rh_lingual_thickness', + 'rh_medialorbitofrontal_thickness', 'rh_middletemporal_thickness', + 'rh_parahippocampal_thickness', 'rh_paracentral_thickness', + 'rh_parsopercularis_thickness', 'rh_parsorbitalis_thickness', + 'rh_parstriangularis_thickness', 'rh_pericalcarine_thickness', + 'rh_postcentral_thickness', 'rh_posteriorcingulate_thickness', + 'rh_precentral_thickness', 'rh_precuneus_thickness', + 'rh_rostralanteriorcingulate_thickness', + 'rh_rostralmiddlefrontal_thickness', 'rh_superiorfrontal_thickness', + 'rh_superiorparietal_thickness', 'rh_superiortemporal_thickness', + 'rh_supramarginal_thickness', 'rh_frontalpole_thickness', + 'rh_temporalpole_thickness', 'rh_transversetemporal_thickness', + 'rh_insula_thickness', 'rh_MeanThickness_thickness']] + +.. code:: ipython3 + + all_data_covariates = all_data[['age','sex','site']] + +Right now, the sites are coded in a single column using a string. We +need to instead dummy encode the site variable so that there is a column +for each site and the columns contain binary variables (0/1). Luckily +pandas has a nice built in function, ``pd.get_dummies`` to help us +format the site column this way! + +.. code:: ipython3 + + all_data_covariates = pd.get_dummies(all_data_covariates, columns=['site']) + +.. code:: ipython3 + + all_data['Average_Thickness'] = all_data[['lh_MeanThickness_thickness','rh_MeanThickness_thickness']].mean(axis=1) + +Take a sneak peak to see if there are any super obvious site effects. If +there were, we would see a large separation in the fitted regression +line for each site. + +.. code:: ipython3 + + sns.set_theme(style="darkgrid",font_scale=1.5) + c = sns.lmplot(data=all_data, x="age", y="Average_Thickness", hue="site", height=6) + plt.ylim(1.5, 3.25) + plt.xlim(15, 95) + plt.show() + +Create train/test split +----------------------- + +We will use 80% of the data for training and 20% for testing. We +stratify our train/test split using the site variable to make sure that +the train/test sets both contain data from all sites. The model wouldn’t +learn the site effects if all of the data from one site was only in the +test set. + +.. code:: ipython3 + + X_train, X_test, y_train, y_test = train_test_split(all_data_covariates, all_data_features, stratify=all_data['site'], test_size=0.2, random_state=42) + +Verify that your train & test arrays are the same size + +.. code:: ipython3 + + tr_cov_size = X_train.shape + tr_resp_size = y_train.shape + te_cov_size = X_test.shape + te_resp_size = y_test.shape + print("Train covariate size is: ", tr_cov_size) + print("Test covariate size is: ", te_cov_size) + print("Train response size is: ", tr_resp_size) + print("Test response size is: ", te_resp_size) + + +Save out each ROI to its own file: + +We setup the normative model so that for each Y (brain region) we fit a +separate model. While the estimate function in the pcntoolkit can handle +having all of the Y’s in a single text file, for this tutorial we are +going to organize our Y’s so that they are each in their own text file +and directory. + +.. code:: ipython3 + + os.chdir('/Users/saigerutherford/repos/PCNToolkit-demo/') + +.. code:: ipython3 + + cd data/ + +.. code:: ipython3 + + for c in y_train.columns: + y_train[c].to_csv('resp_tr_' + c + '.txt', header=False, index=False) + +.. code:: ipython3 + + X_train.to_csv('cov_tr.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + y_train.to_csv('resp_tr.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + for c in y_test.columns: + y_test[c].to_csv('resp_te_' + c + '.txt', header=False, index=False) + +.. code:: ipython3 + + X_test.to_csv('cov_te.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + y_test.to_csv('resp_te.txt', sep = '\t', header=False, index = False) + +.. code:: ipython3 + + ! if [[ ! -e data/ROI_models/ ]]; then mkdir data/ROI_models; fi + ! if [[ ! -e data/covariate_files/ ]]; then mkdir data/covariate_files; fi + ! if [[ ! -e data/response_files/ ]]; then mkdir data/response_files; fi + +.. code:: ipython3 + + ! for i in `cat data/roi_dir_names`; do cd data/ROI_models; mkdir ${i}; cd ../../; cp resp_tr_${i}.txt data/ROI_models/${i}/resp_tr.txt; cp resp_te_${i}.txt data/ROI_models/${i}/resp_te.txt; cp cov_tr.txt data/ROI_models/${i}/cov_tr.txt; cp cov_te.txt data/ROI_models/${i}/cov_te.txt; done + +.. code:: ipython3 + + ! mv resp_*.txt data/response_files/ + +.. code:: ipython3 + + ! mv cov_t*.txt data/covariate_files/ + +Step 5: Run normative model +****************************** + +.. code:: ipython3 + + # set this path to wherever your ROI_models folder is located (where you copied all of the covariate & response text files to in Step 4) + data_dir = '/Users/saigerutherford/repos/PCNToolkit-demo/data/ROI_models/' + +.. code:: ipython3 + + # Create a list of all the ROIs you want to run a normative model for + roi_ids = ['lh_MeanThickness_thickness', + 'rh_MeanThickness_thickness', + 'lh_bankssts_thickness', + 'lh_caudalanteriorcingulate_thickness', + 'lh_superiorfrontal_thickness', + 'rh_superiorfrontal_thickness'] + +When we split the data into train and test sets, we did not reset the +index. This means that the row numbers in the train/test matrices are +still the same as before splitting the data. We will need the test set +row numbers of which subjects belong to which site in order to evaluate +per site performance metrics, so we need to reset the row numbers in the +train/test split matrices. + +.. code:: ipython3 + + x_col_names = ['age', 'sex', 'site_cam', 'site_hcp', 'site_ixi'] + X_train = pd.read_csv('data/covariate_files/cov_tr.txt', sep='\t', header=None, names=x_col_names) + X_test = pd.read_csv('data/covariate_files/cov_te.txt', sep='\t', header=None, names=x_col_names) + y_train = pd.read_csv('data/response_files/resp_tr.txt', sep='\t', header=None) + y_test = pd.read_csv('data/response_files/resp_te.txt', sep='\t', header=None) + +.. code:: ipython3 + + X_train.reset_index(drop=True, inplace=True) + X_test.reset_index(drop=True, inplace=True) + y_train.reset_index(drop=True, inplace=True) + y_test.reset_index(drop=True, inplace=True) + +Extract site indices: + +Get site ids so that we can evaluate the test metrics independently for +each site + +.. code:: ipython3 + + cam_idx = X_test.index[X_test['site_cam' ]== 1].to_list() + hcp_idx = X_test.index[X_test['site_hcp'] == 1].to_list() + ixi_idx = X_test.index[X_test['site_ixi'] == 1].to_list() + + # Save the site indices into a single list + sites = [cam_idx, hcp_idx, ixi_idx] + + # Create a list with sites names to use in evaluating per-site metrics + site_names = ['cam', 'hcp', 'ixi'] + +Basis expansion: + +Now, we set up a B-spline basis set that allows us to perform nonlinear +regression using a linear model. This basis is deliberately chosen to +not to be too flexible so that in can only model relatively slowly +varying trends. To increase the flexibility of the model you can change +the parameterisation (e.g. by adding knot points to the Bspline basis or +increasing the order of the interpolating polynomial). + +Note that in the neuroimaging literature, it is more common to use a +polynomial basis expansion for this. Piecewise polynomials like +B-splines are superior because they do not introduce a global curvature. +See the reference below for further information. + +`Primer on regression +splines `__ + +`Reference for why polynomials are a bad +idea `__ + +.. code:: ipython3 + + # Create a cubic B-spline basis (used for regression) + xmin = 10#16 # xmin & xmax are the boundaries for ages of participants in the dataset + xmax = 95#90 + B = create_bspline_basis(xmin, xmax) + + # create the basis expansion for the covariates for each of the + for roi in roi_ids: + print('Creating basis expansion for ROI:', roi) + roi_dir = os.path.join(data_dir, roi) + os.chdir(roi_dir) + + # create output dir + os.makedirs(os.path.join(roi_dir,'blr'), exist_ok=True) + + # load train & test covariate data matrices + X_tr = np.loadtxt(os.path.join(roi_dir, 'cov_tr.txt')) + X_te = np.loadtxt(os.path.join(roi_dir, 'cov_te.txt')) + + # add intercept column + X_tr = np.concatenate((X_tr, np.ones((X_tr.shape[0],1))), axis=1) + X_te = np.concatenate((X_te, np.ones((X_te.shape[0],1))), axis=1) + np.savetxt(os.path.join(roi_dir, 'cov_int_tr.txt'), X_tr) + np.savetxt(os.path.join(roi_dir, 'cov_int_te.txt'), X_te) + + # create Bspline basis set + Phi = np.array([B(i) for i in X_tr[:,0]]) + Phis = np.array([B(i) for i in X_te[:,0]]) + X_tr = np.concatenate((X_tr, Phi), axis=1) + X_te = np.concatenate((X_te, Phis), axis=1) + np.savetxt(os.path.join(roi_dir, 'cov_bspline_tr.txt'), X_tr) + np.savetxt(os.path.join(roi_dir, 'cov_bspline_te.txt'), X_te) + +Prepare output structures: + +.. code:: ipython3 + + # Create pandas dataframes with header names to save out the overall and per-site model evaluation metrics + blr_metrics = pd.DataFrame(columns = ['ROI', 'MSLL', 'EV', 'SMSE', 'RMSE', 'Rho']) + blr_site_metrics = pd.DataFrame(columns = ['ROI', 'site', 'y_mean', 'y_var', 'yhat_mean', 'yhat_var', 'MSLL', 'EV', 'SMSE', 'RMSE', 'Rho']) + +Estimate the normative models: + +In this step, we estimate the normative models one at a time. In +principle we could also do this on the whole data matrix at once +(e.g. with the response variables stored in a n_subjects x +n_brain_measures numpy array). However, doing it this way gives us some +extra flexibility in that it does not require that the subjects are +exactly the same for each of the brain measures. + +This code fragment will loop through each region of interest in the +roi_ids list (set a few code blocks above) using Bayesian linear +regression and evaluate the model on the independent test set. It will +then compute error metrics such as the explained variance, mean +standardized log loss and Pearson correlation between true and predicted +test responses separately for each scanning site. + +We supply the estimate function with a few specific arguments that are +worthy of commenting on: \* alg = ‘blr’ : specifies we should use +Bayesian linear regression \* optimizer = ‘powell’ : use Powell’s +derivative-free optimization method (faster in this case than L-BFGS) \* +savemodel = False : do not write out the final estimated model to disk +\* saveoutput = False : return the outputs directly rather than writing +them to disk \* standardize = False : Do not standardize the covariates +or response variables + +One important consideration is whether or not to standardize. Whilst +this generally only has a minor effect on the final model accuracy, it +has implications for the interpretation of models and how they are +configured. If the covariates and responses are both standardized, the +model will return standardized coefficients. If (as in this case) the +response variables are not standardized, then the scaling both +covariates and responses will be reflected in the estimated +coefficients. Also, under the linear modelling approach employed here, +if the coefficients are unstandardized and do not have a zero mean, it +is necessary to add an intercept column to the design matrix. This is +done in the code block above. + +.. code:: ipython3 + + # Loop through ROIs + for roi in roi_ids: + print('Running ROI:', roi) + roi_dir = os.path.join(data_dir, roi) + os.chdir(roi_dir) + + # configure the covariates to use. Change *_bspline_* to *_int_* to + cov_file_tr = os.path.join(roi_dir, 'cov_bspline_tr.txt') + cov_file_te = os.path.join(roi_dir, 'cov_bspline_te.txt') + + # load train & test response files + resp_file_tr = os.path.join(roi_dir, 'resp_tr.txt') + resp_file_te = os.path.join(roi_dir, 'resp_te.txt') + + # run a basic model + yhat_te, s2_te, nm, Z, metrics_te = estimate(cov_file_tr, + resp_file_tr, + testresp=resp_file_te, + testcov=cov_file_te, + alg = 'blr', + optimizer = 'powell', + savemodel = False, + saveoutput = False, + standardize = False) + # display and save metrics + print('EV=', metrics_te['EXPV'][0]) + print('RHO=', metrics_te['Rho'][0]) + print('MSLL=', metrics_te['MSLL'][0]) + blr_metrics.loc[len(blr_metrics)] = [roi, metrics_te['MSLL'][0], metrics_te['EXPV'][0], metrics_te['SMSE'][0], + metrics_te['RMSE'][0], metrics_te['Rho'][0]] + + # Compute metrics per site in test set, save to pandas df + # load true test data + X_te = np.loadtxt(cov_file_te) + y_te = np.loadtxt(resp_file_te) + y_te = y_te[:, np.newaxis] # make sure it is a 2-d array + + # load training data (required to compute the MSLL) + y_tr = np.loadtxt(resp_file_tr) + y_tr = y_tr[:, np.newaxis] + + for num, site in enumerate(sites): + y_mean_te_site = np.array([[np.mean(y_te[site])]]) + y_var_te_site = np.array([[np.var(y_te[site])]]) + yhat_mean_te_site = np.array([[np.mean(yhat_te[site])]]) + yhat_var_te_site = np.array([[np.var(yhat_te[site])]]) + + metrics_te_site = evaluate(y_te[site], yhat_te[site], s2_te[site], y_mean_te_site, y_var_te_site) + + site_name = site_names[num] + blr_site_metrics.loc[len(blr_site_metrics)] = [roi, site_names[num], + y_mean_te_site[0], + y_var_te_site[0], + yhat_mean_te_site[0], + yhat_var_te_site[0], + metrics_te_site['MSLL'][0], + metrics_te_site['EXPV'][0], + metrics_te_site['SMSE'][0], + metrics_te_site['RMSE'][0], + metrics_te_site['Rho'][0]] + +.. code:: ipython3 + + os.chdir(data_dir) + +.. code:: ipython3 + + # Save per site test set metrics variable to CSV file + blr_site_metrics.to_csv('blr_site_metrics.csv', index=False, index_label=None) + +.. code:: ipython3 + + # Save overall test set metrics to CSV file + blr_metrics.to_csv('blr_metrics.csv', index=False, index_label=None) + +Step 6: Interpreting model performance +***************************************** + +Output evaluation metrics definitions + +================= ====================================================================================================== +**key value** **Description** +----------------- ------------------------------------------------------------------------------------------------------ +yhat predictive mean +ys2 predictive variance +nm normative model +Z deviance scores +Rho Pearson correlation between true and predicted responses +pRho parametric p-value for this correlation +RMSE root mean squared error between true/predicted responses +SMSE standardised mean squared error +EV explained variance +MSLL mean standardized log loss `See page 23 `_ +================= ====================================================================================================== + + diff --git a/doc/source/pages/updates.rst b/doc/source/pages/updates.rst new file mode 100644 index 00000000..38b10ae3 --- /dev/null +++ b/doc/source/pages/updates.rst @@ -0,0 +1,7 @@ +.. _updates: + +.. title:: List of updates + +Updates +================== + diff --git a/doc/source/pcn-logo.png b/doc/source/pcn-logo.png new file mode 100644 index 00000000..f08cca9b Binary files /dev/null and b/doc/source/pcn-logo.png differ diff --git a/pcntoolkit/__init__.py b/pcntoolkit/__init__.py index 339bebca..087fe624 100644 --- a/pcntoolkit/__init__.py +++ b/pcntoolkit/__init__.py @@ -1,12 +1,4 @@ -from . import bayesreg from . import trendsurf -from . import gp from . import normative -from . import utils -from . import fileio from . import normative_parallel -from . import rfa -from . import architecture -from . import NP from . import normative_NP -from . import hbr diff --git a/pcntoolkit/dataio/__init__.py b/pcntoolkit/dataio/__init__.py new file mode 100644 index 00000000..1208872a --- /dev/null +++ b/pcntoolkit/dataio/__init__.py @@ -0,0 +1 @@ +from . import fileio diff --git a/pcntoolkit/fileio.py b/pcntoolkit/dataio/fileio.py similarity index 99% rename from pcntoolkit/fileio.py rename to pcntoolkit/dataio/fileio.py index 4e63f4c4..f4b85aff 100644 --- a/pcntoolkit/fileio.py +++ b/pcntoolkit/dataio/fileio.py @@ -14,6 +14,7 @@ pass path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.dirname(path) # parent directory if path not in sys.path: sys.path.append(path) del path diff --git a/pcntoolkit/NP.py b/pcntoolkit/model/NP.py similarity index 100% rename from pcntoolkit/NP.py rename to pcntoolkit/model/NP.py diff --git a/pcntoolkit/NPR.py b/pcntoolkit/model/NPR.py similarity index 100% rename from pcntoolkit/NPR.py rename to pcntoolkit/model/NPR.py diff --git a/pcntoolkit/model/__init__.py b/pcntoolkit/model/__init__.py new file mode 100644 index 00000000..fe59b2d4 --- /dev/null +++ b/pcntoolkit/model/__init__.py @@ -0,0 +1,6 @@ +from . import bayesreg +from . import gp +from . import rfa +from . import architecture +from . import NP +from . import hbr diff --git a/pcntoolkit/architecture.py b/pcntoolkit/model/architecture.py similarity index 100% rename from pcntoolkit/architecture.py rename to pcntoolkit/model/architecture.py diff --git a/pcntoolkit/bayesreg.py b/pcntoolkit/model/bayesreg.py old mode 100644 new mode 100755 similarity index 55% rename from pcntoolkit/bayesreg.py rename to pcntoolkit/model/bayesreg.py index 8045e37c..634c03d3 --- a/pcntoolkit/bayesreg.py +++ b/pcntoolkit/model/bayesreg.py @@ -52,16 +52,24 @@ def __init__(self, **kwargs): tol = kwargs.get('tol', 1e-3) verbose = kwargs.get('verbose', False) var_groups = kwargs.get('var_groups', None) + var_covariates = kwargs.get('var_covariates', None) warp = kwargs.get('warp', None) warp_reparam = kwargs.get('warp_reparam', False) - + + if var_groups is not None and var_covariates is not None: + raise ValueError("var_covariates and var_groups cannot both be used") + # basic parameters self.hyp = np.nan self.nlZ = np.nan self.tol = tol # not used at present self.n_iter = n_iter self.verbose = verbose - self.var_groups = var_groups + self.var_groups = var_groups + if var_covariates is not None: + self.hetero_var = True + else: + self.hetero_var = False if self.var_groups is not None: self.var_ids = set(self.var_groups) self.var_ids = sorted(list(self.var_ids)) @@ -79,18 +87,28 @@ def __init__(self, **kwargs): self.gamma = None - def _parse_hyps(self, hyp, X): + def _parse_hyps(self, hyp, X, Xv=None): N = X.shape[0] # noise precision - if self.var_groups is None: - beta = np.asarray([np.exp(hyp[0])]) - else: + if Xv is not None: + if len(Xv.shape) == 1: + Dv = 1 + Xv = Xv[:, np.newaxis] + else: + Dv = Xv.shape[1] + w_d = np.asarray(hyp[0:Dv]) + beta = np.exp(Xv.dot(w_d)) + n_lik_param = len(w_d) + elif self.var_groups is not None: beta = np.exp(hyp[0:len(self.var_ids)]) + n_lik_param = len(beta) + else: + beta = np.asarray([np.exp(hyp[0])]) + n_lik_param = len(beta) # parameters for warping the likelhood function - n_lik_param = len(beta) if self.warp is not None: gamma = hyp[n_lik_param:(n_lik_param + self.n_warp_param)] n_lik_param += self.n_warp_param @@ -109,24 +127,29 @@ def _parse_hyps(self, hyp, X): beta = beta/(delta**2) # Create precision matrix from noise precision - if self.var_groups is None: - self.Lambda_n = np.diag(np.ones(N)*beta) - self.Sigma_n = np.diag(np.ones(N)/beta) - else: + if Xv is not None: + self.lambda_n_vec = beta + elif self.var_groups is not None: beta_all = np.ones(N) for v in range(len(self.var_ids)): beta_all[self.var_groups == self.var_ids[v]] = beta[v] - self.Lambda_n = np.diag(beta_all) - self.Sigma_n = np.diag(1/beta_all) + self.lambda_n_vec = beta_all + else: + self.lambda_n_vec = np.ones(N)*beta return beta, alpha, gamma - def post(self, hyp, X, y): + def post(self, hyp, X, y, Xv=None): """ Generic function to compute posterior distribution. This function will save the posterior mean and precision matrix as self.m and self.A and will also update internal parameters (e.g. N, D and the prior covariance (Sigma_a) and precision (Lambda_a). + + :param hyp: hyperparameter vector + :param X: covariates + :param y: responses + :param Xv: covariates for heteroskedastic noise """ N = X.shape[0] @@ -139,7 +162,7 @@ def post(self, hyp, X, y): print("hyperparameters have not changed, exiting") return - beta, alpha, gamma = self._parse_hyps(hyp, X) + beta, alpha, gamma = self._parse_hyps(hyp, X, Xv) if self.verbose: print("estimating posterior ... | hyp=", hyp) @@ -152,22 +175,28 @@ def post(self, hyp, X, y): raise ValueError("hyperparameter vector has invalid length") # compute posterior precision and mean - self.A = X.T.dot(self.Lambda_n).dot(X) + self.Lambda_a - self.m = linalg.solve(self.A, X.T, - check_finite=False).dot(self.Lambda_n).dot(y) - #self.m = linalg.lstsq(self.A, X.T, - # check_finite=False)[0].dot(self.Lambda_n).dot(y) + # this is equivalent to the following operation but makes much more + # efficient use of memory by avoiding the need to store Lambda_n + # + # self.A = X.T.dot(self.Lambda_n).dot(X) + self.Lambda_a + # self.m = linalg.solve(self.A, X.T, + # check_finite=False).dot(self.Lambda_n).dot(y) + + XtLambda_n = X.T*self.lambda_n_vec + self.A = XtLambda_n.dot(X) + self.Lambda_a + invAXt = linalg.solve(self.A, X.T, check_finite=False) + self.m = (invAXt*self.lambda_n_vec).dot(y) # save stuff self.N = N self.D = D self.hyp = hyp - def loglik(self, hyp, X, y): + def loglik(self, hyp, X, y, Xv=None): """ Function to compute compute log (marginal) likelihood """ # hyperparameters (alpha not needed) - beta, alpha, gamma = self._parse_hyps(hyp, X) + beta, alpha, gamma = self._parse_hyps(hyp, X, Xv) # warp the likelihood? if self.warp is not None: @@ -179,7 +208,7 @@ def loglik(self, hyp, X, y): # load posterior and prior covariance if (hyp != self.hyp).any() or not(hasattr(self, 'A')): try: - self.post(hyp, X, y) + self.post(hyp, X, y, Xv) except ValueError: print("Warning: Estimation of posterior distribution failed") nlZ = 1/np.finfo(float).eps @@ -194,17 +223,19 @@ def loglik(self, hyp, X, y): return nlZ logdetSigma_a = sum(np.log(np.diag(self.Sigma_a))) # diagonal - logdetSigma_n = sum(np.log(np.diag(self.Sigma_n))) - + logdetSigma_n = sum(np.log(1/self.lambda_n_vec)) + # compute negative marginal log likelihood + X_y_t_sLambda_n = (y-X.dot(self.m))*np.sqrt(self.lambda_n_vec) nlZ = -0.5 * (-self.N*np.log(2*np.pi) - logdetSigma_n - logdetSigma_a - - (y-X.dot(self.m)).T.dot(self.Lambda_n).dot(y-X.dot(self.m)) - + X_y_t_sLambda_n.T.dot(X_y_t_sLambda_n) - self.m.T.dot(self.Lambda_a).dot(self.m) - logdetA ) + if self.warp is not None: # add in the Jacobian nlZ = nlZ - sum(np.log(self.warp.df(y_unwarped, gamma))) @@ -218,12 +249,31 @@ def loglik(self, hyp, X, y): self.nlZ = nlZ return nlZ + + def penalized_loglik(self, hyp, X, y, Xv=None, l=0.1, norm='L1'): + """ Function to compute the penalized log (marginal) likelihood + + :param hyp: hyperparameter vector + :param X: covariates + :param y: responses + :param Xv: covariates for heteroskedastic noise + :param l: regularisation penalty + :param norm: type of regulariser (L1 or L2) + """ + + if norm.lower() == 'l1': + L = self.loglik(hyp, X, y, Xv) + l * sum(abs(hyp)) + elif norm.lower() == 'l2': + L = self.loglik(hyp, X, y, Xv) + l * sum(np.sqrt(hyp**2)) + else: + print("Requested penalty not recognized, choose between 'L1' or 'L2'.") + return L - def dloglik(self, hyp, X, y): + def dloglik(self, hyp, X, y, Xv=None): """ Function to compute derivatives """ # hyperparameters - beta, alpha, gamma = self._parse_hyps(hyp, X) + beta, alpha, gamma = self._parse_hyps(hyp, X, Xv) if self.warp is not None: raise ValueError('optimization with derivatives is not yet ' + \ @@ -232,7 +282,7 @@ def dloglik(self, hyp, X, y): # load posterior and prior covariance if (hyp != self.hyp).any() or not(hasattr(self, 'A')): try: - self.post(hyp, X, y) + self.post(hyp, X, y, Xv) except ValueError: print("Warning: Estimation of posterior distribution failed") dnlZ = np.sign(self.dnlZ) / np.finfo(float).eps @@ -243,14 +293,15 @@ def dloglik(self, hyp, X, y): # that would remove the need to explicitly compute the inverse S = np.linalg.inv(self.A) # posterior covariance SX = S.dot(X.T) - XLn = X.T.dot(self.Lambda_n) + XLn = X.T*self.lambda_n_vec # = X.T.dot(self.Lambda_n) XLny = XLn.dot(y) SXLny = S.dot(XLny) XLnXm = XLn.dot(X).dot(self.m) # initialise derivatives dnlZ = np.zeros(hyp.shape) - + dnl2 = np.zeros(hyp.shape) + # noise precision parameter(s) for i in range(0, len(beta)): # first compute derivative of Lambda_n with respect to beta @@ -269,12 +320,16 @@ def dloglik(self, hyp, X, y): b = -S.dot(dA).dot(SXLny) + SX.dot(dLambda_n).dot(y) # compute np.trace(self.Sigma_n.dot(dLambda_n)) efficiently - trSigma_ndLambda_n = sum(np.diag(self.Sigma_n)*np.diag(dLambda_n)) + trSigma_ndLambda_n = sum((1/self.lambda_n_vec)*np.diag(dLambda_n)) + + # compute y.T.dot(Lambda_n) efficiently + ytLn = (y*self.lambda_n_vec).T + # compute derivatives dnlZ[i] = - (0.5 * trSigma_ndLambda_n - 0.5 * y.dot(dLambda_n).dot(y) + y.dot(dLambda_n).dot(X).dot(self.m) + - y.T.dot(self.Lambda_n).dot(X).dot(b) - + ytLn.dot(X).dot(b) - 0.5 * self.m.T.dot(XdLnX).dot(self.m) - b.T.dot(XLnXm) - b.T.dot(self.Lambda_a).dot(self.m) - @@ -318,20 +373,38 @@ def dloglik(self, hyp, X, y): # model estimation (optimization) def estimate(self, hyp0, X, y, **kwargs): - """ Function to estimate the model """ + """ Function to estimate the model + + :param hyp: hyperparameter vector + :param X: covariates + :param y: responses + :param optimizer: optimisation algorithm ('cg','powell','nelder-mead','l0bfgs-b') + """ + optimizer = kwargs.get('optimizer','cg') + + # covariates for heteroskedastic noise + Xv = kwargs.get('var_covariates', None) + + # options for l-bfgs-b + l = kwargs.get('l', 0.1) + epsilon = kwargs.get('epsilon', 0.1) + norm = kwargs.get('norm', 'l2') if optimizer.lower() == 'cg': # conjugate gradients - out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (X, y), + out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (X, y, Xv), disp=True, gtol=self.tol, maxiter=self.n_iter, full_output=1) - elif optimizer.lower() == 'powell': # Powell's method - out = optimize.fmin_powell(self.loglik, hyp0, (X, y), + out = optimize.fmin_powell(self.loglik, hyp0, (X, y, Xv), full_output=1) elif optimizer.lower() == 'nelder-mead': - out = optimize.fmin(self.loglik, hyp0, (X, y), + out = optimize.fmin(self.loglik, hyp0, (X, y, Xv), full_output=1) + elif optimizer.lower() == 'l-bfgs-b': + out = optimize.fmin_l_bfgs_b(self.penalized_loglik, x0=hyp0, + args=(X, y, Xv, l, norm), approx_grad=True, + epsilon=epsilon) else: raise ValueError("unknown optimizer") @@ -341,41 +414,144 @@ def estimate(self, hyp0, X, y, **kwargs): return self.hyp - def predict(self, hyp, X, y, Xs, var_groups_test=None): - """ Function to make predictions from the model """ - + def predict(self, hyp, X, y, Xs, + var_groups_test=None, + var_covariates_test=None, **kwargs): + """ Function to make predictions from the model + + :param hyp: hyperparameter vector + :param X: covariates for training data + :param y: responses for training data + :param Xs: covariates for test data + :param var_covariates_test: test covariates for heteroskedastic noise + + This always returns Gaussian predictions, i.e. + + :returns: * ys - predictive mean + * s2 - predictive variance + """ + + Xvs = var_covariates_test + if Xvs is not None and len(Xvs.shape) == 1: + Xvs = Xvs[:, np.newaxis] + if X is None or y is None: # set dummy hyperparameters - beta, alpha, gamma = self._parse_hyps(hyp, np.zeros((self.N, 1))) + beta, alpha, gamma = self._parse_hyps(hyp, np.zeros((self.N, self.D)), Xvs) else: # set hyperparameters - beta, alpha, gamma = self._parse_hyps(hyp, X) + beta, alpha, gamma = self._parse_hyps(hyp, X, Xvs) # do we need to re-estimate the posterior? if (hyp != self.hyp).any() or not(hasattr(self, 'A')): - # warp the likelihood? - if self.warp is not None: - if self.verbose: - print('warping input...') - y = self.warp.f(y, gamma) - self.post(hyp, X, y) + raise(ValueError, 'posterior not properly estimated') N_test = Xs.shape[0] ys = Xs.dot(self.m) - if self.var_groups is None: - s2n = 1/beta - else: + if self.var_groups is not None: if len(var_groups_test) != N_test: raise(ValueError, 'Invalid variance groups for test') # separate variance groups s2n = np.ones(N_test) for v in range(len(self.var_ids)): s2n[var_groups_test == self.var_ids[v]] = 1/beta[v] - + else: + s2n = 1/beta + # compute xs.dot(S).dot(xs.T) avoiding computing off-diagonal entries s2 = s2n + np.sum(Xs*linalg.solve(self.A, Xs.T).T, axis=1) - return ys, s2 + return ys, s2 + + def predict_and_adjust(self, hyp, X, y, Xs=None, + ys=None, + var_groups_test=None, + var_groups_adapt=None, **kwargs): + """ Function to transfer the model to a new site. This is done by + first making predictions on the adaptation data given by X, + adjusting by the residuals with respect to y. + + :param hyp: hyperparameter vector + :param X: covariates for adaptation (i.e. calibration) data + :param y: responses for adaptation data + :param Xs: covariate data (for which predictions should be adjusted) + :param ys: true response variables (to be adjusted) + :param var_groups_test: variance groups (e.g. sites) for test data + :param var_groups_adapt: variance groups for adaptation data + + There are two possible ways of using this function, depending on + whether ys or Xs is specified + + If ys is specified, this is applied directly to the data, which is + assumed to be in the input space (i.e. not warped). In this case + the adjusted true data points are returned in the same space + + Alternatively, Xs is specified, then the predictions are made and + adjusted. In this case the predictive variance are returned in the + warped (i.e. Gaussian) space. + + This function needs to know which sites are associated with which + data points, which provided by var_groups_xxx, which is a list or + array of scalar ids . + """ + + if ys is None: + if Xs is None: + raise ValueError('Either ys or Xs must be specified') + else: + N = Xs.shape[0] + else: + if len(ys.shape) < 1: + raise ValueError('ys is specified but has insufficent length') + N = ys.shape[0] + + if var_groups_test is None: + var_groups_test = np.ones(N) + var_groups_adapt = np.ones(X.shape[0]) + + ys_out = np.zeros(N) + s2_out = np.zeros(N) + for g in np.unique(var_groups_test): + idx_s = var_groups_test == g + idx_a = var_groups_adapt == g + + if sum(idx_a) < 2: + raise ValueError('Insufficient adaptation data to estimate variance') + + # Get predictions from old model on new data X + ys_ref, s2_ref = self.predict(hyp, None, None, X[idx_a,:]) + + # Subtract the predictions from true data to get the residuals + if self.warp is None: + residuals = ys_ref-y[idx_a] + else: + # Calculate the residuals in warped space + y_ref_ws = self.warp.f(y[idx_a], hyp[1:self.warp.get_n_params()+1]) + residuals = ys_ref - y_ref_ws + + residuals_mu = np.mean(residuals) + residuals_sd = np.std(residuals) + + # Adjust the mean with the mean of the residuals + if ys is None: + # make and adjust predictions + ys_out[idx_s], s2_out[idx_s] = self.predict(hyp, None, None, Xs[idx_s,:]) + ys_out[idx_s] = ys_out[idx_s] - residuals_mu + + # Set the deviation to the devations of the residuals + s2_out[idx_s] = np.ones(len(s2_out[idx_s]))*residuals_sd**2 + else: + # adjust the data + if self.warp is not None: + y_ws = self.warp.f(ys[idx_s], hyp[1:self.warp.get_n_params()+1]) + ys_out[idx_s] = y_ws + residuals_mu + ys_out[idx_s] = self.warp.invf(ys_out[idx_s], hyp[1:self.warp.get_n_params()+1]) + else: + ys = ys - residuals_mu + s2_out = None + + return ys_out, s2_out + diff --git a/pcntoolkit/gp.py b/pcntoolkit/model/gp.py similarity index 99% rename from pcntoolkit/gp.py rename to pcntoolkit/model/gp.py index e8b8db72..e7af393b 100644 --- a/pcntoolkit/gp.py +++ b/pcntoolkit/model/gp.py @@ -17,11 +17,12 @@ pass path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.dirname(path) # parent directory if path not in sys.path: sys.path.append(path) del path - from utils import squared_dist + from util.utils import squared_dist # -------------------- # Covariance functions diff --git a/pcntoolkit/hbr.py b/pcntoolkit/model/hbr.py similarity index 100% rename from pcntoolkit/hbr.py rename to pcntoolkit/model/hbr.py diff --git a/pcntoolkit/rfa.py b/pcntoolkit/model/rfa.py similarity index 100% rename from pcntoolkit/rfa.py rename to pcntoolkit/model/rfa.py diff --git a/pcntoolkit/normative.py b/pcntoolkit/normative.py index 10332361..11e69f3c 100755 --- a/pcntoolkit/normative.py +++ b/pcntoolkit/normative.py @@ -23,11 +23,11 @@ from sklearn.model_selection import KFold try: # run as a package if installed - from pcntoolkit import fileio from pcntoolkit import configs + from pcntoolkit.dataio import fileio from pcntoolkit.normative_model.norm_utils import norm_init - from pcntoolkit.utils import compute_pearsonr, CustomCV, explained_var - from pcntoolkit.utils import compute_MSLL, scaler + from pcntoolkit.util.utils import compute_pearsonr, CustomCV, explained_var + from pcntoolkit.util.utils import compute_MSLL, scaler except ImportError: pass @@ -36,11 +36,12 @@ sys.path.append(path) #sys.path.append(os.path.join(path,'normative_model')) del path - - import fileio + import configs - from utils import compute_pearsonr, CustomCV, explained_var, compute_MSLL - from utils import scaler + from dataio import fileio + + from util.utils import compute_pearsonr, CustomCV, explained_var, compute_MSLL + from util.utils import scaler from normative_model.norm_utils import norm_init PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL @@ -81,7 +82,7 @@ def get_args(*args): parser.add_argument("-a", help="algorithm", dest="alg", default="gpr") parser.add_argument("-x", help="algorithm specific config options", dest="configparam", default=None) - #parser.add_argument('-s', action='store_false', + # parser.add_argument('-s', action='store_false', # help="Flag to skip standardization.", dest="standardize") parser.add_argument("keyword_args", nargs=argparse.REMAINDER) @@ -130,7 +131,7 @@ def get_args(*args): args.configparam, kw_args -def evaluate(Y, Yhat, S2=None, mY=None, sY=None, +def evaluate(Y, Yhat, S2=None, mY=None, sY=None, nlZ=None, nm=None, Xz_tr=None, alg=None, metrics = ['Rho', 'RMSE', 'SMSE', 'EXPV', 'MSLL']): ''' Compute error metrics This function will compute error metrics based on a set of predictions Yhat @@ -201,6 +202,16 @@ def evaluate(Y, Yhat, S2=None, mY=None, sY=None, mY.reshape(-1,1).T, (sY**2).reshape(-1,1).T) results['MSLL'] = MSLL + + if 'NLL' in metrics: + results['NLL'] = nlZ + + if 'BIC' in metrics: + if hasattr(getattr(nm, alg), 'hyp'): + n = Xz_tr.shape[0] + k = len(getattr(nm, alg).hyp) + BIC = k * np.log(n) + 2 * nlZ + results['BIC'] = BIC return results @@ -234,8 +245,12 @@ def save_results(respfile, Yhat, S2, maskvol, Z=None, outputsuffix=None, if results is not None: for metric in list(results.keys()): - fileio.save(results[metric], os.path.join(save_path, metric + ext), + if (metric == 'NLL' or metric == 'BIC') and file_ext == '.nii.gz': + fileio.save(results[metric], os.path.join(save_path, metric + str(outputsuffix) + '.pkl'), example=exfile, mask=maskvol) + else: + fileio.save(results[metric], os.path.join(save_path, metric + ext), + example=exfile, mask=maskvol) def estimate(covfile, respfile, **kwargs): """ Estimate a normative model @@ -299,7 +314,10 @@ def estimate(covfile, respfile, **kwargs): testcov = kwargs.pop('testcov', None) testresp = kwargs.pop('testresp',None) alg = kwargs.pop('alg','gpr') - outputsuffix = kwargs.pop('outputsuffix','_estimate') + outputsuffix = kwargs.pop('outputsuffix','estimate') + outputsuffix = "_" + outputsuffix.replace("_", "") # Making sure there is only one + # '_' is in the outputsuffix to + # avoid file name parsing problem. inscaler = kwargs.pop('inscaler','None') outscaler = kwargs.pop('outscaler','None') warp = kwargs.get('warp', None) @@ -418,7 +436,7 @@ def estimate(covfile, respfile, **kwargs): fileio.save(be[ts,:], 'be_kfold_ts_tempfile.pkl') kwargs['trbefile'] = 'be_kfold_tr_tempfile.pkl' kwargs['tsbefile'] = 'be_kfold_ts_tempfile.pkl' - + # estimate the models for all subjects for i in range(0, len(nz)): print("Estimating model ", i+1, "of", len(nz)) @@ -496,12 +514,17 @@ def estimate(covfile, respfile, **kwargs): if warp is None: results = evaluate(Y[testids, :], Yhat[testids, :], S2=S2[testids, :], mY=mean_resp[0], - sY=std_resp[0]) + sY=std_resp[0], nlZ=nlZ, nm=nm, Xz_tr=Xz_tr, alg=alg, + metrics = ['Rho', 'RMSE', 'SMSE', 'EXPV', + 'MSLL', 'NLL', 'BIC']) else: results = evaluate(Ywarp[testids, :], Yhat[testids, :], S2=S2[testids, :], mY=mean_resp_warp[0], - sY=std_resp_warp[0]) - + sY=std_resp_warp[0], nlZ=nlZ, nm=nm, Xz_tr=Xz_tr, + alg=alg, metrics = ['Rho', 'RMSE', 'SMSE', + 'EXPV', 'MSLL', + 'NLL', 'BIC']) + # Set writing options if saveoutput: @@ -530,7 +553,8 @@ def fit(covfile, respfile, **kwargs): maskfile = kwargs.pop('maskfile',None) alg = kwargs.pop('alg','gpr') savemodel = kwargs.pop('savemodel','True')=='True' - outputsuffix = kwargs.pop('outputsuffix','_fit') + outputsuffix = kwargs.pop('outputsuffix','fit') + outputsuffix = "_" + outputsuffix.replace("_", "") inscaler = kwargs.pop('inscaler','None') outscaler = kwargs.pop('outscaler','None') @@ -633,8 +657,10 @@ def predict(covfile, respfile, maskfile=None, **kwargs): model_path = kwargs.pop('model_path', 'Models') job_id = kwargs.pop('job_id', None) batch_size = kwargs.pop('batch_size', None) - outputsuffix = kwargs.pop('outputsuffix', '_predict') - inputsuffix = kwargs.pop('inputsuffix', '_estimate') + outputsuffix = kwargs.pop('outputsuffix', 'predict') + outputsuffix = "_" + outputsuffix.replace("_", "") + inputsuffix = kwargs.pop('inputsuffix', 'estimate') + inputsuffix = "_" + inputsuffix.replace("_", "") alg = kwargs.pop('alg') if respfile is not None and not os.path.exists(respfile): @@ -693,7 +719,7 @@ def predict(covfile, respfile, maskfile=None, **kwargs): if (alg!='hbr' or nm.configs['transferred']==False): yhat, s2 = nm.predict(Xz, **kwargs) else: - tsbefile = kwargs.pop('tsbefile') + tsbefile = kwargs.get('tsbefile') batch_effects_test = fileio.load(tsbefile) yhat, s2 = nm.predict_on_new_sites(Xz, batch_effects_test) @@ -784,8 +810,10 @@ def transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None, trbefile = kwargs.pop('trbefile') batch_effects_train = fileio.load(trbefile) - outputsuffix = kwargs.pop('outputsuffix', '_transfer') - inputsuffix = kwargs.pop('inputsuffix', '_estimate') + outputsuffix = kwargs.pop('outputsuffix', 'transfer') + outputsuffix = "_" + outputsuffix.replace("_", "") + inputsuffix = kwargs.pop('inputsuffix', 'estimate') + inputsuffix = "_" + inputsuffix.replace("_", "") tsbefile = kwargs.pop('tsbefile', None) job_id = kwargs.pop('job_id', None) @@ -930,8 +958,10 @@ def extend(covfile, respfile, maskfile=None, **kwargs): dummycovfile = kwargs.pop('dummycovfile') dummybefile = kwargs.pop('dummybefile') - outputsuffix = kwargs.pop('outputsuffix', '_extend') - inputsuffix = kwargs.pop('inputsuffix', '_estimate') + outputsuffix = kwargs.pop('outputsuffix', 'extend') + outputsuffix = "_" + outputsuffix.replace("_", "") + inputsuffix = kwargs.pop('inputsuffix', 'estimate') + inputsuffix = "_" + inputsuffix.replace("_", "") informative_prior = kwargs.pop('informative_prior', 'False') == 'True' generation_factor = int(kwargs.pop('generation_factor', '10')) job_id = kwargs.pop('job_id', None) diff --git a/pcntoolkit/normative_NP.py b/pcntoolkit/normative_NP.py index 45462f78..3694e146 100644 --- a/pcntoolkit/normative_NP.py +++ b/pcntoolkit/normative_NP.py @@ -27,13 +27,13 @@ from torch import optim import numpy as np import pickle -from NP import NP, apply_dropout_test, np_loss +from pcntoolkit.model.NP import NP, apply_dropout_test, np_loss from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn.linear_model import LinearRegression, MultiTaskLasso -from architecture import Encoder, Decoder -from pcntoolkit.utils import compute_pearsonr, explained_var, compute_MSLL -from pcntoolkit.utils import extreme_value_prob, extreme_value_prob_fit, ravel_2D, unravel_2D -from pcntoolkit import fileio +from pcntoolkit.model.architecture import Encoder, Decoder +from pcntoolkit.util.utils import compute_pearsonr, explained_var, compute_MSLL +from pcntoolkit.util.utils import extreme_value_prob, extreme_value_prob_fit, ravel_2D, unravel_2D +from pcntoolkit.dataio import fileio import os try: # run as a package if installed diff --git a/pcntoolkit/normative_model/norm_blr.py b/pcntoolkit/normative_model/norm_blr.py index 7b95f320..7205efd5 100644 --- a/pcntoolkit/normative_model/norm_blr.py +++ b/pcntoolkit/normative_model/norm_blr.py @@ -8,9 +8,11 @@ from ast import literal_eval try: # run as a package if installed - from pcntoolkit.bayesreg import BLR - from pcntoolkit.normative_model.normbase import NormBase - from pcntoolkit.utils import create_poly_basis + from pcntoolkit.model.bayesreg import BLR + from pcntoolkit.normative_model.norm_base import NormBase + from pcntoolkit.dataio import fileio + from pcntoolkit.util.utils import create_poly_basis, WarpBoxCox, \ + WarpAffine, WarpCompose, WarpSinArcsinh except ImportError: pass @@ -19,9 +21,10 @@ sys.path.append(path) del path - from bayesreg import BLR + from model.bayesreg import BLR from norm_base import NormBase - from utils import create_poly_basis, WarpBoxCox, \ + from dataio import fileio + from util.utils import create_poly_basis, WarpBoxCox, \ WarpAffine, WarpCompose, WarpSinArcsinh class NormBLR(NormBase): @@ -60,9 +63,20 @@ def __init__(self, **kwargs): if type(model_order) is not int: model_order = int(model_order) - # configure variance groups (e.g. site specific variance) - if 'var_groups' in kwargs: - var_groups_file = kwargs.pop('var_groups') + # configure heteroskedastic noise + if 'varcovfile' in kwargs: + var_cov_file = kwargs.get('varcovfile') + if var_cov_file.endswith('.pkl'): + self.var_covariates = pd.read_pickle(var_cov_file) + else: + self.var_covariates = np.loadtxt(var_cov_file) + if len(self.var_covariates.shape) == 1: + self.var_covariates = self.var_covariates[:, np.newaxis] + n_beta = self.var_covariates.shape[1] + self.var_groups = None + elif 'vargroupfile' in kwargs: + # configure variance groups (e.g. site specific variance) + var_groups_file = kwargs.pop('vargroupfile') if var_groups_file.endswith('.pkl'): self.var_groups = pd.read_pickle(var_groups_file) else: @@ -72,6 +86,7 @@ def __init__(self, **kwargs): n_beta = len(var_ids) else: self.var_groups = None + self.var_covariates = None n_beta = 1 # are we using ARD? @@ -111,8 +126,8 @@ def __init__(self, **kwargs): # initialise the BLR object if the required parameters are present if (theta is not None) and (y is not None): - self.Phi = create_poly_basis(X, self._model_order) - self.blr = BLR(theta=theta, X=self.Phi, y=y, + Phi = create_poly_basis(X, self._model_order) + self.blr = BLR(theta=theta, X=Phi, y=y, warp=self.warp, **kwargs) else: self.blr = BLR(**kwargs) @@ -133,8 +148,7 @@ def estimate(self, X, y, **kwargs): # remove warp string to prevent it being passed to the blr object kwargs.pop('warp',None) - if not hasattr(self,'Phi'): - self.Phi = create_poly_basis(X, self._model_order) + Phi = create_poly_basis(X, self._model_order) if len(y.shape) > 1: y = y.ravel() @@ -142,34 +156,84 @@ def estimate(self, X, y, **kwargs): theta = self.theta0 # (re-)initialize BLR object because parameters were not specified - self.blr = BLR(theta=theta, X=self.Phi, y=y, + self.blr = BLR(theta=theta, X=Phi, y=y, var_groups=self.var_groups, warp=self.warp, **kwargs) - self.theta = self.blr.estimate(theta, self.Phi, y, - optimizer=self.optim_alg) + self.theta = self.blr.estimate(theta, Phi, y, + var_covariates=self.var_covariates, **kwargs) return self def predict(self, Xs, X=None, y=None, **kwargs): theta = self.theta # always use the estimated coefficients - # remove from kwargs + # remove from kwargs to avoid downstream problems kwargs.pop('theta', None) + + Phis = create_poly_basis(Xs, self._model_order) - if 'var_groups_test' in kwargs: - var_groups_test_file = kwargs.pop('var_groups_test') + if X is None: + Phi =None + else: + Phi = create_poly_basis(X, self._model_order) + + # process variance groups for the test data + if 'testvargroupfile' in kwargs: + var_groups_test_file = kwargs.pop('testvargroupfile') if var_groups_test_file.endswith('.pkl'): var_groups_te = pd.read_pickle(var_groups_test_file) else: var_groups_te = np.loadtxt(var_groups_test_file) else: var_groups_te = None + + # process test variance covariates + if 'testvarcovfile' in kwargs: + var_cov_test_file = kwargs.get('testvarcovfile') + if var_cov_test_file.endswith('.pkl'): + var_cov_te = pd.read_pickle(var_cov_test_file) + else: + var_cov_te = np.loadtxt(var_cov_test_file) + else: + var_cov_te = None + + # do we want to adjust the responses? + if 'adaptrespfile' in kwargs: + y_adapt = fileio.load(kwargs.pop('adaptrespfile')) + if len(y_adapt.shape) == 1: + y_adapt = y_adapt[:, np.newaxis] + else: + y_adapt = None + + if 'adaptcovfile' in kwargs: + X_adapt = fileio.load(kwargs.pop('adaptcovfile')) + Phi_adapt = create_poly_basis(X_adapt, self._model_order) + else: + Phi_adapt = None + + if 'adaptvargroupfile' in kwargs: + var_groups_adapt_file = kwargs.pop('adaptvargroupfile') + if var_groups_adapt_file.endswith('.pkl'): + var_groups_ad = pd.read_pickle(var_groups_adapt_file) + else: + var_groups_ad = np.loadtxt(var_groups_adapt_file) + else: + var_groups_ad = None - yhat, s2 = self.blr.predict(theta, self.Phi, y, Phis, - var_groups_test=var_groups_te) + + if y_adapt is None: + yhat, s2 = self.blr.predict(theta, Phi, y, Phis, + var_groups_test=var_groups_te, + var_covariates_test=var_cov_te, + **kwargs) + else: + yhat, s2 = self.blr.predict_and_adjust(theta, Phi_adapt, y_adapt, Phis, + var_groups_test=var_groups_te, + var_groups_adapt=var_groups_ad, + **kwargs) return yhat, s2 \ No newline at end of file diff --git a/pcntoolkit/normative_model/norm_gpr.py b/pcntoolkit/normative_model/norm_gpr.py index 39a6f9c6..280adb25 100644 --- a/pcntoolkit/normative_model/norm_gpr.py +++ b/pcntoolkit/normative_model/norm_gpr.py @@ -16,7 +16,7 @@ sys.path.append(path) del path - from gp import GPR, CovSum + from model.gp import GPR, CovSum from norm_base import NormBase class NormGPR(NormBase): diff --git a/pcntoolkit/normative_model/norm_hbr.py b/pcntoolkit/normative_model/norm_hbr.py index ffbd7da1..ad60a508 100644 --- a/pcntoolkit/normative_model/norm_hbr.py +++ b/pcntoolkit/normative_model/norm_hbr.py @@ -13,10 +13,10 @@ import sys import numpy as np -try: # run as a package if installed - from pcntoolkit import fileio - from pcntoolkit.normative_model.normbase import NormBase - from pcntoolkit.hbr import HBR +try: + from pcntoolkit.dataio import fileio + from pcntoolkit.normative_model.norm_base import NormBase + from pcntoolkit.model.hbr import HBR except ImportError: pass @@ -24,10 +24,11 @@ if path not in sys.path: sys.path.append(path) del path - import fileio - from hbr import HBR + import dataio.fileio as fileio + from model.hbr import HBR from norm_base import NormBase + class NormHBR(NormBase): """ Classical GPR-based normative modelling approach """ diff --git a/pcntoolkit/normative_model/norm_np.py b/pcntoolkit/normative_model/norm_np.py index b6d9e4d8..29834d31 100644 --- a/pcntoolkit/normative_model/norm_np.py +++ b/pcntoolkit/normative_model/norm_np.py @@ -30,7 +30,7 @@ sys.path.append(path) del path - from NPR import NPR, np_loss + from model.NPR import NPR, np_loss from norm_base import NormBase class struct(object): diff --git a/pcntoolkit/normative_model/norm_rfa.py b/pcntoolkit/normative_model/norm_rfa.py index 0fae09a0..275ba4f1 100644 --- a/pcntoolkit/normative_model/norm_rfa.py +++ b/pcntoolkit/normative_model/norm_rfa.py @@ -16,7 +16,7 @@ sys.path.append(path) del path - from rfa import GPRRFA + from model.rfa import GPRRFA from norm_base import NormBase class NormRFA(NormBase): diff --git a/pcntoolkit/normative_parallel.py b/pcntoolkit/normative_parallel.py index 3b05b0d0..903ed6b6 100755 --- a/pcntoolkit/normative_parallel.py +++ b/pcntoolkit/normative_parallel.py @@ -33,17 +33,18 @@ try: import pcntoolkit as ptk - import pcntoolkit.fileio as fileio + import pcntoolkit.dataio.fileio as fileio from pcntoolkit import configs + ptkpath = ptk.__path__[0] except ImportError: pass - path = os.path.abspath(os.path.dirname(__file__)) - if path not in sys.path: - sys.path.append(path) - del path - import fileio + ptkpath = os.path.abspath(os.path.dirname(__file__)) + if ptkpath not in sys.path: + sys.path.append(ptkpath) + import dataio.fileio as fileio import configs + PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL @@ -96,7 +97,7 @@ def execute_nm(processing_dir, """ if normative_path is None: - normative_path = ptk.__path__[0] + '/normative.py' + normative_path = ptkpath + '/normative.py' cv_folds = kwargs.get('cv_folds', None) testcovfile_path = kwargs.get('testcovfile_path', None) @@ -492,6 +493,18 @@ def collect_nm(processing_dir, Z = pd.DataFrame(Z) fileio.save(Z, batch + 'Z' + outputsuffix + file_extentions) + + nll = np.zeros(batch_size) + nll = nll.transpose() + nll = pd.Series(nll) + fileio.save(nll, batch + 'NLL' + outputsuffix + + file_extentions) + + bic = np.zeros(batch_size) + bic = bic.transpose() + bic = pd.Series(bic) + fileio.save(bic, batch + 'BIC' + outputsuffix + + file_extentions) if not os.path.isdir(batch + 'Models'): os.mkdir('Models') @@ -626,6 +639,30 @@ def collect_nm(processing_dir, fileio.save(msll_dfs, processing_dir + 'MSLL' + outputsuffix + file_extentions) del msll_dfs + + nll_filenames = glob.glob(processing_dir + 'batch_*/' + 'NLL' + + outputsuffix + '*') + if nll_filenames: + nll_filenames = fileio.sort_nicely(nll_filenames) + nll_dfs = [] + for nll_filename in nll_filenames: + nll_dfs.append(pd.DataFrame(fileio.load(nll_filename))) + nll_dfs = pd.concat(nll_dfs, ignore_index=True, axis=0) + fileio.save(nll_dfs, processing_dir + 'NLL' + outputsuffix + + file_extentions) + del nll_dfs + + bic_filenames = glob.glob(processing_dir + 'batch_*/' + 'BIC' + + outputsuffix + '*') + if bic_filenames: + bic_filenames = fileio.sort_nicely(bic_filenames) + bic_dfs = [] + for bic_filename in bic_filenames: + bic_dfs.append(pd.DataFrame(fileio.load(bic_filename))) + bic_dfs = pd.concat(bic_dfs, ignore_index=True, axis=0) + fileio.save(bic_dfs, processing_dir + 'BIC' + outputsuffix + + file_extentions) + del bic_dfs if func != 'predict' and func != 'extend': if not os.path.isdir(processing_dir + 'Models') and \ @@ -807,8 +844,8 @@ def bashwrap_nm(processing_dir, job_call = [job_call[0] + ' -x ' + str(configparam)] # add standardization flag if it is false - if not standardize: - job_call = [job_call[0] + ' -s'] + # if not standardize: + # job_call = [job_call[0] + ' -s'] # add responses file job_call = [job_call[0] + ' ' + respfile_path] @@ -1008,8 +1045,8 @@ def sbatchwrap_nm(processing_dir, job_call = [job_call[0] + ' -x ' + str(configparam)] # add standardization flag if it is false - if not standardize: - job_call = [job_call[0] + ' -s'] + # if not standardize: + # job_call = [job_call[0] + ' -s'] # add responses file job_call = [job_call[0] + ' ' + respfile_path] diff --git a/pcntoolkit/trendsurf.py b/pcntoolkit/trendsurf.py index 4de0c1db..e0f0d6e5 100644 --- a/pcntoolkit/trendsurf.py +++ b/pcntoolkit/trendsurf.py @@ -17,8 +17,8 @@ import argparse try: # Run as a package if installed - from pcntoolkit import fileio - from pcntoolkit.bayesreg import BLR + from pcntoolkit.dataio import fileio + from pcntoolkit.model.bayesreg import BLR except ImportError: pass path = os.path.abspath(os.path.dirname(__file__)) @@ -26,8 +26,8 @@ sys.path.append(path) del path - import fileio - from bayesreg import BLR + from dataio import fileio + from model.bayesreg import BLR diff --git a/pcntoolkit/util/__init__.py b/pcntoolkit/util/__init__.py new file mode 100644 index 00000000..9f9161bf --- /dev/null +++ b/pcntoolkit/util/__init__.py @@ -0,0 +1 @@ +from . import utils \ No newline at end of file diff --git a/pcntoolkit/utils.py b/pcntoolkit/util/utils.py similarity index 77% rename from pcntoolkit/utils.py rename to pcntoolkit/util/utils.py index 4be22202..1e766411 100644 --- a/pcntoolkit/utils.py +++ b/pcntoolkit/util/utils.py @@ -16,6 +16,8 @@ from sklearn.datasets import make_regression import pymc3 as pm from io import StringIO +import subprocess +import re try: # run as a package if installed from pcntoolkit import configs @@ -23,9 +25,10 @@ pass path = os.path.abspath(os.path.dirname(__file__)) - if path not in sys.path: - sys.path.append(path) - del path + rootpath = os.path.dirname(path) # parent directory + if rootpath not in sys.path: + sys.path.append(rootpath) + del path, rootpath import configs PICKLE_PROTOCOL = configs.PICKLE_PROTOCOL @@ -59,6 +62,86 @@ def create_bspline_basis(xmin, xmax, p = 3, nknots = 5): B = bspline.Bspline(k, p) return B +def create_design_matrix(X, intercept = True, basis = 'bspline', + basis_column = 0, site_ids=None, all_sites=None, + **kwargs): + """ Prepare a design matrix from a set of covariates sutiable for + running Bayesian linar regression. This design matrix consists of + a set of user defined covariates, optoinal site intercepts + (fixed effects) and also optionally a nonlinear basis expansion over + one of the columns + + :param X: matrix of covariates + :param basis: type of basis expansion to use + :param basis_column: which colume to perform the expansion over? + :param site_ids: list of site ids (one per data point) + :param all_sites: list of unique site ids + :param p: order of spline (3 = cubic) + :param nknots: number of knots (endpoints only counted once) + + if site_ids is specified, this must have the same number of entries as + there are rows in X. If all_sites is specfied, these will be used to + create the site identifiers in place of site_ids. This accommocdates + the scenario where not all the sites used to create the model are + present in the test set (i.e. there will be some empty site columns) + """ + + xmin = kwargs.pop('xmin', 0) + xmax = kwargs.pop('xmax', 100) + + N = X.shape[0] + + if type(X) is pd.DataFrame: + X = X.to_numpy() + + # add intercept column + if intercept: + Phi = np.concatenate((np.ones((N, 1)), X), axis=1) + else: + Phi = X + + # add dummy coded site columns + if all_sites is None: + if site_ids is not None: + all_sites = sorted(pd.unique(site_ids)) + + if site_ids is None: + if all_sites is None: + site_cols = None + else: + # site ids are not specified, but all_sites are + site_cols = np.zeros((N, len(all_sites))) + else: + # site ids are defined + # make sure the data are in pandas format + if type(site_ids) is not pd.Series: + site_ids = pd.Series(data=site_ids) + #site_ids = pd.Series(data=site_ids) + + # make sure all_sites is defined + if all_sites is None: + all_sites = sorted(pd.unique(site_ids)) + + # dummy code the sites + site_cols = np.zeros((N, len(all_sites))) + for i, s in enumerate(all_sites): + site_cols[:, i] = site_ids == s + + if site_cols.shape[0] != N: + raise ValueError('site cols must have the same number of rows as X') + + if site_cols is not None: + Phi = np.concatenate((Phi, site_cols), axis=1) + + # create Bspline basis set + if basis == 'bspline': + B = create_bspline_basis(xmin, xmax, **kwargs) + Phi = np.concatenate((Phi, np.array([B(i) for i in X[:,basis_column]])), axis=1) + elif basis == 'poly': + Phi = np.concatenate(Phi, create_poly_basis(X[:,basis_column], **kwargs)) + + return Phi + def squared_dist(x, z=None): """ compute sum((x-z) ** 2) for all vectors in a 2d array""" @@ -340,12 +423,13 @@ class WarpSinArcsinh(WarpBase): Using the parametrisation of Rios et al, Neural Networks 118 (2017) where a controls skew and b controls kurtosis, such that: - a = 0 : symmetric - a > 0 : positive skew - a < 0 : negative skew - b = 1 : mesokurtic - b > 1 : leptokurtic - b < 1 : platykurtic + + * a = 0 : symmetric + * a > 0 : positive skew + * a < 0 : negative skew + * b = 1 : mesokurtic + * b > 1 : leptokurtic + * b < 1 : platykurtic where b > 0. However, it is more convenentent to use an alternative parameterisation, where @@ -792,25 +876,26 @@ def load_freesurfer_measure(measure, data_path, subjects_list): This is a utility function to load different Freesurfer measures in a pandas Dataframe. - Inputs: - - measure: a string that defines the type of Freesurfer measure we want - to load. The options include: - - 'NumVert': Number of Vertices in each cortical area based on Destrieux atlas. - - 'SurfArea: Surface area for each cortical area based on Destrieux atlas. - - 'GrayVol': Gary matter volume in each cortical area based on Destrieux atlas. - - 'ThickAvg': Average Cortical thinckness in each cortical area based on Destrieux atlas. - - 'ThickStd': STD of Cortical thinckness in each cortical area based on Destrieux atlas. - - 'MeanCurv': Integrated Rectified Mean Curvature in each cortical area based on Destrieux atlas. - - 'GausCurv': Integrated Rectified Gaussian Curvature in each cortical area based on Destrieux atlas. - - 'FoldInd': Folding Index in each cortical area based on Destrieux atlas. - - 'CurvInd': Intrinsic Curvature Index in each cortical area based on Destrieux atlas. - - 'brain': Brain Segmentation Statistics from aseg.stats file. - - 'subcortical_volumes': Subcortical areas volume. - - - data_path: a string that specifies the path to the main Freesurfer folder. + Inputs + + :param measure: a string that defines the type of Freesurfer measure we want to load. \ + The options include: + + * 'NumVert': Number of Vertices in each cortical area based on Destrieux atlas. + * 'SurfArea: Surface area for each cortical area based on Destrieux atlas. + * 'GrayVol': Gary matter volume in each cortical area based on Destrieux atlas. + * 'ThickAvg': Average Cortical thinckness in each cortical area based on Destrieux atlas. + * 'ThickStd': STD of Cortical thinckness in each cortical area based on Destrieux atlas. + * 'MeanCurv': Integrated Rectified Mean Curvature in each cortical area based on Destrieux atlas. + * 'GausCurv': Integrated Rectified Gaussian Curvature in each cortical area based on Destrieux atlas. + * 'FoldInd': Folding Index in each cortical area based on Destrieux atlas. + * 'CurvInd': Intrinsic Curvature Index in each cortical area based on Destrieux atlas. + * 'brain': Brain Segmentation Statistics from aseg.stats file. + * 'subcortical_volumes': Subcortical areas volume. - - subjects_list: A Pythin list containing the list of subject names to load the data for. - The subject names should match the folder name for each subject's Freesurfer data folder. + :param data_path: a string that specifies the path to the main Freesurfer folder. + :param subjects_list: A Pythin list containing the list of subject names to load the data for. \ + The subject names should match the folder name for each subject's Freesurfer data folder. Outputs: - df: A pandas datafrmae containing the subject names as Index and target Freesurfer measures. @@ -996,4 +1081,115 @@ def fit_transform(self, X, adjust_outliers=False): X[X < 0] = 0 X[X > 1] = 1 - return X \ No newline at end of file + return X + + + +def retrieve_freesurfer_eulernum(freesurfer_dir, subjects=None, save_path=None): + + ''' + This function receives the freesurfer directory (including processed data + for several subjects) and retrieves the Euler number from the log files. If + the log file does not exist, this function uses 'mris_euler_number' to recompute + the Euler numbers (ENs). The function returns the ENs in a dataframe and + the list of missing subjects (that for which computing EN is failed). If + 'save_path' is specified then the results will be saved in a pickle file. + + Basic usage:: + + ENs, missing_subjects = retrieve_freesurfer_eulernum(freesurfer_dir) + + where the arguments are defined below. + + :param freesurfer_dir: absolute path to the Freesurfer directory. + :param subjects: List of subject that we want to retrieve the ENs for. + If it is 'None' (the default), the list of the subjects will be automatically + retreived from existing directories in the 'freesurfer_dir' (i.e. the ENs + for all subjects will be retrieved). + :param save_path: The path to save the results. If 'None' (default) the + results are not saves on the disk. + + + :outputs: * ENs - A dataframe of retrieved ENs. + * missing_subjects - The list of missing subjects. + + Developed by S.M. Kia + + ''' + + if subjects is None: + subjects = [temp for temp in os.listdir(freesurfer_dir) + if os.path.isdir(os.path.join(freesurfer_dir ,temp))] + + df = pd.DataFrame(index=subjects, columns=['lh_en','rh_en','avg_en']) + missing_subjects = [] + + for s, sub in enumerate(subjects): + sub_dir = os.path.join(freesurfer_dir, sub) + log_file = os.path.join(sub_dir, 'scripts', 'recon-all.log') + + if os.path.exists(sub_dir): + if os.path.exists(log_file): + with open(log_file) as f: + for line in f: + # find the part that refers to the EC + if re.search('orig.nofix lheno', line): + eno_line = line + f.close() + eno_l = eno_line.split()[3][0:-1] # remove the trailing comma + eno_r = eno_line.split()[6] + euler = (float(eno_l) + float(eno_r)) / 2 + + df.at[sub, 'lh_en'] = eno_l + df.at[sub, 'rh_en'] = eno_r + df.at[sub, 'avg_en'] = euler + + print('%d: Subject %s is successfully processed. EN = %f' + %(s, sub, df.at[sub, 'avg_en'])) + else: + print('%d: Subject %s is missing log file, running QC ...' %(s, sub)) + try: + bashCommand = 'mris_euler_number '+ freesurfer_dir + sub +'/surf/lh.orig.nofix>' + 'temp_l.txt 2>&1' + res = subprocess.run(bashCommand, stdout=subprocess.PIPE, shell=True) + file = open('temp_l.txt', mode = 'r', encoding = 'utf-8-sig') + lines = file.readlines() + file.close() + words = [] + for line in lines: + line = line.strip() + words.append([item.strip() for item in line.split(' ')]) + eno_l = np.float32(words[0][12]) + + bashCommand = 'mris_euler_number '+ freesurfer_dir + sub +'/surf/rh.orig.nofix>' + 'temp_r.txt 2>&1' + res = subprocess.run(bashCommand, stdout=subprocess.PIPE, shell=True) + file = open('temp_r.txt', mode = 'r', encoding = 'utf-8-sig') + lines = file.readlines() + file.close() + words = [] + for line in lines: + line = line.strip() + words.append([item.strip() for item in line.split(' ')]) + eno_r = np.float32(words[0][12]) + + df.at[sub, 'lh_en'] = eno_l + df.at[sub, 'rh_en'] = eno_r + df.at[sub, 'avg_en'] = (eno_r + eno_l) / 2 + + print('%d: Subject %s is successfully processed. EN = %f' + %(s, sub, df.at[sub, 'avg_en'])) + + except: + e = sys.exc_info()[0] + missing_subjects.append(sub) + print('%d: QC is failed for subject %s: %s.' %(s, sub, e)) + + else: + missing_subjects.append(sub) + print('%d: Subject %s is missing.' %(s, sub)) + df = df.dropna() + + if save_path is not None: + with open(save_path, 'wb') as file: + pickle.dump({'ENs':df}, file) + + return df, missing_subjects diff --git a/setup.py b/setup.py index 467fb277..90937629 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ from setuptools import setup, find_packages setup(name='pcntoolkit', - version='0.19', + version='0.20', description='Predictive Clinical Neuroscience toolkit', url='http://github.com/amarquand/nispat', author='Andre Marquand', @@ -10,15 +10,18 @@ packages=find_packages(), install_requires=[ 'argparse', - 'nibabel', + 'nibabel>=2.5.1', 'six', 'sklearn', 'bspline', 'matplotlib', + 'numpy>=1.19.5', + 'scipy>=1.3.2', 'pandas>=0.25.3', 'torch>=1.1.0', - 'pymc3==3.8', - 'Theano==1.0.5', + 'sphinx-tabs', + 'pymc3>=3.8,<=3.9.3', + 'theano==1.0.5', 'arviz==0.11.0' ], zip_safe=False) diff --git a/tests/testHBR.py b/tests/testHBR.py index 446e8514..c5a33d57 100644 --- a/tests/testHBR.py +++ b/tests/testHBR.py @@ -9,7 +9,7 @@ import os import numpy as np from pcntoolkit.normative_model.norm_utils import norm_init -from pcntoolkit.utils import simulate_data +from pcntoolkit.util.utils import simulate_data import matplotlib.pyplot as plt from pcntoolkit.normative import estimate from warnings import filterwarnings diff --git a/tests/test_blr.py b/tests/test_blr.py index 70c3b6a5..2f88e2bd 100644 --- a/tests/test_blr.py +++ b/tests/test_blr.py @@ -1,13 +1,13 @@ import sys -sys.path.append('/home/preclineu/andmar/sfw/PCNtoolkit/pcntoolkit') +#sys.path.append('/home/preclineu/andmar/sfw/PCNtoolkit/pcntoolkit') import numpy as np import scipy as sp from matplotlib import pyplot as plt import bspline from bspline import splinelab -from bayesreg import BLR -from gp import GPR -from utils import WarpBoxCox, WarpAffine, WarpCompose, WarpSinArcsinh +from pcntoolkit.model.bayesreg import BLR +from pcntoolkit.model.gp import GPR +from pcntoolkit.util.utils import WarpBoxCox, WarpAffine, WarpCompose, WarpSinArcsinh print('First do a simple evaluation of B-splines regression...') @@ -45,7 +45,8 @@ hyp0 = np.zeros(2) #hyp0 = np.zeros(4) # use ARD -B = BLR(hyp0, Phi, y) +#B = BLR(hyp0, Phi, y) +B = BLR() hyp = B.estimate(hyp0, Phi, y, optimizer='powell') yhat,s2 = B.predict(hyp, Phi, y, Phis) @@ -71,10 +72,13 @@ Phix = X[:, np.newaxis] Phixs = Xs[:, np.newaxis] +Bw = BLR(warp=W) +#hyp0 = 0.1*np.ones(2+W.get_n_params()) +#hyp = Bw.estimate(hyp0, Phi, y, optimizer='powell') +#yhat, s2 = Bw.predict(hyp, Phi, y, Phis) hyp0 = 0.1*np.ones(2+W.get_n_params()) -Bw = BLR(hyp0, Phi, y, warp=W) -hyp = Bw.estimate(hyp0, Phi, y, optimizer='powell') -yhat, s2 = Bw.predict(hyp, Phi, y, Phis) +hyp = Bw.estimate(hyp0, Phi, y, optimizer='powell', var_covariates=Phix) +yhat, s2 = Bw.predict(hyp, Phi, y, Phis, var_covariates_test=Phixs) warp_param = hyp[1:W.get_n_params()+1] med, pr_int = W.warp_predictions(yhat, s2, warp_param) @@ -95,7 +99,31 @@ plt.title('estimated warping function') plt.show() -print("Estimate a model with heteroskedastic noise ...") +# estimate a model with heteroskedastic noise +print('demonstrate heteroskedastic noise...' ) +# generative model +b = [0.4, -0.01, 0.] # true regression coefficients +s2 = 0.1 # noise variance +y = Phip.dot(b) + Phip[:,0]*np.random.normal(size=N) +plt.scatter(X,y) + +# new version +Bh = BLR() +hyp0 = np.zeros(8) +hyp = Bh.estimate(hyp0, Phi, y, optimizer='l-bfgs-b', var_covariates=Phi, verbose=True) +yhat,s2 = Bh.predict(hyp, Phi, y, Phis, var_covariates_test=Phis) + +# old version +#Bh = BLR(hetero_noise=7) +#hyp0 = np.zeros(8) +#hyp = Bh.estimate(hyp0, Phi, y, optimizer='l-bfgs-b', hetero_noise=7, verbose=True) +#yhat,s2 = Bh.predict(hyp, Phi, y, Phis) + +print(hyp) +plt.fill_between(Xs, yhat-1.96*np.sqrt(s2), yhat+1.96*np.sqrt(s2), alpha = 0.2) +plt.show() + +print("Estimate a model with site-specific noise ...") # set up some indicator variables for the variance groups n_site = 3 idx = [] @@ -137,7 +165,7 @@ Phis = np.concatenate((Phis, site_te), axis=1) hyp0=np.zeros(4) -Bh = BLR(hyp0, Phi, y, var_groups=sids) +Bh = BLR(var_groups=sids) Bh.loglik(hyp0, Phi, y) Bh.dloglik(hyp0, Phi, y) hyp = Bh.estimate(hyp0, Phi, y) @@ -148,7 +176,7 @@ plt.scatter(X[idx[s]], y[idx[s]]) plt.plot(Xs[idx_te[s]],yhat[idx_te[s]], color=cols[s]) plt.fill_between(Xs[idx_te[s]], - yhat[idx_te[s]] - 1.96 * np.sqrt(s2[idx_te[s]]), - yhat[idx_te[s]] + 1.96 * np.sqrt(s2[idx_te[s]]), - alpha=0.2, color=cols[s]) + yhat[idx_te[s]] - 1.96 * np.sqrt(s2[idx_te[s]]), + yhat[idx_te[s]] + 1.96 * np.sqrt(s2[idx_te[s]]), + alpha=0.2, color=cols[s]) plt.show() diff --git a/tests/test_gpr.py b/tests/test_gpr.py index 7ef2e188..179a7768 100644 --- a/tests/test_gpr.py +++ b/tests/test_gpr.py @@ -10,7 +10,7 @@ # load as a module sys.path.append('/home/mrstats/andmar/sfw/PCNtoolkit/pcntoolkit') -from gp import GPR, CovSqExp, CovSqExpARD, CovLin +from model.gp import GPR, CovSqExp, CovSqExpARD, CovLin # load from the installed package #from pcntoolkit.gp import GPR, covSqExp diff --git a/tests/test_rand_feat.py b/tests/test_rand_feat.py index b66e9cc8..601e5b49 100644 --- a/tests/test_rand_feat.py +++ b/tests/test_rand_feat.py @@ -1,15 +1,15 @@ import sys import numpy as np import torch -from utils import create_poly_basis +from util.utils import create_poly_basis from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # load as a module sys.path.append('/home/mrstats/andmar/sfw/PCNtoolkit/pcntoolkit') -from gp import GPR, CovSqExp, CovSqExpARD, CovLin, CovSum -from bayesreg import BLR -from rfa import GPRRFA +from model.gp import GPR, CovSqExp, CovSqExpARD, CovLin, CovSum +from model.bayesreg import BLR +from model.rfa import GPRRFA def plot_dist(x, mean, lb, ub, color_mean=None, color_shading=None): # plot the shaded range of the confidence intervals @@ -93,7 +93,7 @@ def f(X): Phis = np.c_[Phis, Xs] hyp_blr = np.asarray([np.log(1/sn2_est), np.log(1)]) -B = BLR(hyp_blr, Phi, y) +B = BLR()#hyp_blr, Phi, y) B.loglik(hyp_blr, Phi, y) yhat_blr, s2_blr = B.predict(hyp_blr, Phi, y, Phis) diff --git a/tests/unit_tests.py b/tests/unit_tests.py index eb5e9fe7..4adae936 100644 --- a/tests/unit_tests.py +++ b/tests/unit_tests.py @@ -13,7 +13,7 @@ from pcntoolkit.normative_parallel import execute_nm, collect_nm, delete_nm ## 2. by appending to the path -##sys.path.clear() +#sys.path.clear() #sys.path.append('/home/preclineu/andmar/sfw/PCNtoolkit/pcntoolkit') #from normative import estimate #from normative_parallel import execute_nm, collect_nm, delete_nm @@ -22,7 +22,7 @@ # General config parameters normative_path = '/home/preclineu/andmar/sfw/PCNtoolkit/pcntoolkit/normative.py' -python_path='/home/preclineu/andmar/sfw/anaconda3/envs/py36/bin/python' +python_path='/home/preclineu/andmar/sfw/anaconda3/envs/py38/bin/python' data_dir = '/home/preclineu/andmar/data/nispat_unit_test_data/' test_dir = '/home/preclineu/andmar/py.sandbox/unittests/unit_test_results' alt_alg = 'blr' # algorithm to test in addition to GPR