+[docs]
+ def post(self, hyp, X, y, Xv=None):
""" Generic function to compute posterior distribution.
This function will save the posterior mean and precision matrix as
@@ -306,7 +317,10 @@
+[docs]
+ def predict(self, hyp, X, y, Xs,
var_groups_test=None,
var_covariates_test=None, **kwargs):
""" Function to make predictions from the model
@@ -595,7 +621,10 @@
Source code for bayesreg
return ys, s2
-
[docs] def predict_and_adjust(self, hyp, X, y, Xs=None,
+
+
+[docs]
+class CovBase(with_metaclass(ABCMeta)):
""" Base class for covariance functions.
All covariance functions must define the following methods::
@@ -148,7 +157,9 @@
+[docs]
+ @abstractmethod
def dcov(self, theta, x, i):
""" Return the derivative of the covariance function with respect to
- the i-th hyperparameter """
+[docs]
+ def cov(self, theta, x, z=None):
if not self.first_call and not theta and theta is not None:
self.first_call = True
if len(theta) > 0 and theta[0] is not None:
@@ -186,11 +209,18 @@
+[docs]
+class CovSum(CovBase):
""" Sum of covariance functions. These are passed in as a cell array and
intialised automatically. For example::
@@ -303,7 +351,9 @@
+[docs]
+ def predict(self, hyp, X, y, Xs):
""" Function to make predictions from the model
"""
if len(hyp.shape) > 1: # force 1d hyperparameter array
@@ -591,7 +662,9 @@
+[docs]
+def load_response_vars(datafile, maskfile=None, vol=True):
"""
Load response variables from file. This will load the data and mask it if
necessary. If the data is in ascii format it will be converted into a numpy
@@ -181,7 +207,10 @@
+[docs]
+def evaluate(Y, Yhat, S2=None, mY=None, sY=None, nlZ=None, nm=None, Xz_tr=None, alg=None,
metrics=['Rho', 'RMSE', 'SMSE', 'EXPV', 'MSLL']):
''' Compute error metrics
This function will compute error metrics based on a set of predictions Yhat
@@ -349,7 +371,10 @@
+[docs]
+def save_results(respfile, Yhat, S2, maskvol, Z=None, Y=None, outputsuffix=None,
results=None, save_path=''):
"""
Writes the results of the normative model to disk.
@@ -406,7 +431,10 @@
+[docs]
+def estimate(covfile, respfile, **kwargs):
""" Estimate a normative model
This will estimate a model in one of two settings according to
@@ -474,7 +502,9 @@
Source code for normative
# '_' is in the outputsuffix to
# avoid file name parsing problem.
inscaler = kwargs.pop('inscaler', 'None')
+ print(f"inscaler: {inscaler}")
outscaler = kwargs.pop('outscaler', 'None')
+ print(f"outscaler: {outscaler}")
warp = kwargs.get('warp', None)
# convert from strings if necessary
@@ -627,7 +657,8 @@
Source code for normative
if warp is not None:
# TODO: Warping for scaled data
if outscaler is not None and outscaler != 'None':
- raise ValueError("outscaler not yet supported warping")
+ raise ValueError(
+ "outscaler not yet supported warping")
warp_param = nm.blr.hyp[1:nm.blr.warp.get_n_params()+1]
Ywarp[ts, nz[i]] = nm.blr.warp.f(
Y[ts, nz[i]], warp_param)
@@ -730,7 +761,10 @@
+[docs]
+def predict(covfile, respfile, maskfile=None, **kwargs):
'''
Make predictions on the basis of a pre-estimated normative model
If only the covariates are specified then only predicted mean and variance
@@ -905,6 +944,10 @@
Source code for normative
X = fileio.load(covfile)
if len(X.shape) == 1:
X = X[:, np.newaxis]
+ if respfile is not None:
+ Y, maskvol = load_response_vars(respfile, maskfile)
+ if len(Y.shape) == 1:
+ Y = Y[:, np.newaxis]
sample_num = X.shape[0]
if models is not None:
@@ -922,9 +965,13 @@
Source code for normative
Xz = scaler_cov[fold].transform(X)
else:
Xz = X
+ if respfile is not None:
+ if outscaler in ['standardize', 'minmax', 'robminmax']:
+ Yz = scaler_resp[fold].transform(Y)
+ else:
+ Yz = Y
# estimate the models for all variabels
- # TODO Z-scores adaptation for SHASH HBR
for i, m in enumerate(models):
print("Prediction by model ", i+1, "of", feature_num)
nm = norm_init(Xz)
@@ -947,6 +994,10 @@
Source code for normative
else:
Yhat[:, i] = yhat.squeeze()
S2[:, i] = s2.squeeze()
+ if respfile is not None:
+ if alg == 'hbr':
+ # Z scores for HBR must be computed independently for each model
+ Z[:, i] = nm.get_mcmc_zscores(Xz, Yz[:, i:i+1], **kwargs)
if respfile is None:
save_results(None, Yhat, S2, None, outputsuffix=outputsuffix)
@@ -954,7 +1005,6 @@
Source code for normative
return (Yhat, S2)
else:
- Y, maskvol = load_response_vars(respfile, maskfile)
if models is not None and len(Y.shape) > 1:
Y = Y[:, models]
if meta_data:
@@ -986,7 +1036,9 @@
Source code for normative
else:
warp = False
- Z = (Y - Yhat) / np.sqrt(S2)
+ if alg != 'hbr':
+ # For HBR the Z scores are already computed
+ Z = (Y - Yhat) / np.sqrt(S2)
print("Evaluating the model ...")
if meta_data and not warp:
@@ -1008,7 +1060,10 @@
+[docs]
+def transfer(covfile, respfile, testcov=None, testresp=None, maskfile=None,
**kwargs):
'''
Transfer learning on the basis of a pre-estimated normative model by using
@@ -1044,14 +1099,14 @@
Source code for normative
return
# testing should not be obligatory for HBR,
# but should be for BLR (since it doesn't produce transfer models)
- elif (not 'model_path' in list(kwargs.keys())) or \
- (not 'trbefile' in list(kwargs.keys())):
+ elif ('model_path' not in list(kwargs.keys())) or \
+ ('trbefile' not in list(kwargs.keys())):
print(f'{kwargs=}')
print('InputError: Some general mandatory arguments are missing.')
return
# hbr has one additional mandatory arguments
elif alg == 'hbr':
- if (not 'output_path' in list(kwargs.keys())):
+ if ('output_path' not in list(kwargs.keys())):
print('InputError: Some mandatory arguments for hbr are missing.')
return
else:
@@ -1063,7 +1118,7 @@
Source code for normative
# or (testresp==None)
elif alg == 'blr':
if (testcov == None) or \
- (not 'tsbefile' in list(kwargs.keys())):
+ ('tsbefile' not in list(kwargs.keys())):
print('InputError: Some mandatory arguments for blr are missing.')
return
# general arguments
@@ -1266,7 +1321,10 @@
+[docs]
+def extend(covfile, respfile, maskfile=None, **kwargs):
'''
This function extends an existing HBR model with data from new sites/scanners.
@@ -1299,9 +1357,9 @@
Source code for normative
if alg != 'hbr':
print('Model extention is only possible for HBR models.')
return
- elif (not 'model_path' in list(kwargs.keys())) or \
- (not 'output_path' in list(kwargs.keys())) or \
- (not 'trbefile' in list(kwargs.keys())):
+ elif ('model_path' not in list(kwargs.keys())) or \
+ ('output_path' not in list(kwargs.keys())) or \
+ ('trbefile' not in list(kwargs.keys())):
print('InputError: Some mandatory arguments are missing.')
return
else:
@@ -1376,7 +1434,10 @@
+[docs]
+def tune(covfile, respfile, maskfile=None, **kwargs):
'''
This function tunes an existing HBR model with real data.
@@ -1410,9 +1471,9 @@
Source code for normative
if alg != 'hbr':
print('Model extention is only possible for HBR models.')
return
- elif (not 'model_path' in list(kwargs.keys())) or \
- (not 'output_path' in list(kwargs.keys())) or \
- (not 'trbefile' in list(kwargs.keys())):
+ elif ('model_path' not in list(kwargs.keys())) or \
+ ('output_path' not in list(kwargs.keys())) or \
+ ('trbefile' not in list(kwargs.keys())):
print('InputError: Some mandatory arguments are missing.')
return
else:
@@ -1487,7 +1548,10 @@
+[docs]
+def merge(covfile=None, respfile=None, **kwargs):
'''
This function extends an existing HBR model with data from new sites/scanners.
@@ -1518,9 +1582,9 @@
Source code for normative
if alg != 'hbr':
print('Merging models is only possible for HBR models.')
return
- elif (not 'model_path1' in list(kwargs.keys())) or \
- (not 'model_path2' in list(kwargs.keys())) or \
- (not 'output_path' in list(kwargs.keys())):
+ elif ('model_path1' not in list(kwargs.keys())) or \
+ ('model_path2' not in list(kwargs.keys())) or \
+ ('output_path' not in list(kwargs.keys())):
print('InputError: Some mandatory arguments are missing.')
return
else:
@@ -1591,7 +1655,10 @@
:param testrespfile_path: Full path to a .txt file that contains all test features
:param log_path: Path for saving log files
:param binary: If True uses binary format for response file otherwise it is text
+ :param cluster_spec: 'torque' for PBS Torque and 'slurm' for Slurm clusters.
:param interactive: If False (default) the user should manually
rerun the failed jobs or collect the results.
If 'auto' the job status are checked until all
@@ -211,10 +222,11 @@
kwargs.update({'batch_size': str(batch_size)})
job_ids = []
+ start_time = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
+
for n in range(1, number_of_batches+1):
kwargs.update({'job_id': str(n)})
if testrespfile_path is not None:
@@ -266,9 +280,10 @@
+[docs]
+def delete_nm(processing_dir,
binary=False):
'''This function deletes all processing for normative modelling and just keeps the combined output.
@@ -971,11 +1015,14 @@
+
# all routines below are envronment dependent and require adaptation in novel
# environments -> copy those routines and adapt them in accrodance with your
# environment
-
respfile_path,
memory,
duration,
+ log_path,
func='estimate',
**kwargs):
'''This function wraps normative modelling into a bash script to run it
@@ -1202,14 +1264,15 @@
+[docs]
+def sbatch_nm(job_path):
'''This function submits a job.sh scipt to the torque custer using the qsub
command.
Basic usage::
- sbatch_nm(job_path, log_path)
+ sbatch_nm(job_path)
:param job_path: Full path to the job.sh file
- :param log_path: The logs are currently stored in the working dir
- :outputs: Submission of the job to the (torque) cluster.
+ :outputs: Submission of the job to the slurm cluster.
written by (primarily) T Wolfers, (adapted) S Rutherford.
'''
@@ -1289,15 +1354,22 @@
+[docs]
+def sbatchrerun_nm(processing_dir,
memory,
duration,
new_memory=False,
new_duration=False,
binary=False,
+ interactive=False,
**kwargs):
'''This function reruns all failed batched in processing_dir after collect_nm has identified he failed batches.
@@ -1315,7 +1387,12 @@
Source code for normative_parallel
written by (primarily) T Wolfers, (adapted) S Rutherford.
'''
- log_path = kwargs.pop('log_path', None)
+
+ # log_path = kwargs.pop('log_path', None)
+
+ job_ids = []
+
+ start_time = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
if binary:
file_extentions = '.pkl'
@@ -1329,11 +1406,12 @@
Source code for normative_parallel
with fileinput.FileInput(jobpath, inplace=True) as file:
for line in file:
print(line.replace(duration, new_duration), end='')
- if new_memory != False:
- with fileinput.FileInput(jobpath, inplace=True) as file:
- for line in file:
- print(line.replace(memory, new_memory), end='')
- sbatch_nm(jobpath, log_path)
+ if new_memory != False:
+ with fileinput.FileInput(jobpath, inplace=True) as file:
+ for line in file:
+ print(line.replace(memory, new_memory), end='')
+ job_id = sbatch_nm(jobpath)
+ job_ids.append(job_id)
else:
file_extentions = '.txt'
@@ -1347,73 +1425,124 @@
Source code for normative_parallel
with fileinput.FileInput(jobpath, inplace=True) as file:
for line in file:
print(line.replace(duration, new_duration), end='')
- if new_memory != False:
- with fileinput.FileInput(jobpath, inplace=True) as file:
- for line in file:
- print(line.replace(memory, new_memory), end='')
- sbatch_nm(jobpath,
- log_path)
+ if new_memory != False:
+ with fileinput.FileInput(jobpath, inplace=True) as file:
+ for line in file:
+ print(line.replace(memory, new_memory), end='')
+ job_id = sbatch_nm(jobpath)
+ job_ids.append(job_id)
+
+ if interactive:
+ check_jobs(job_ids, cluster_spec='slurm',
+ start_time=start_time, delay=60)
+[docs]
+def check_jobs(jobs, cluster_spec, start_time=None, delay=60):
"""
A utility function for chacking the status of submitted jobs.
:param jobs: list of job ids.
+ :param cluster_spec: type of cluster, either 'torque' or 'slurm'.
:param delay: the delay (in sec) between two consequative checks, defaults to 60.
"""
@@ -1421,11 +1550,12 @@
Source code for normative_parallel
n = len(jobs)
while (True):
- q, r, c, u = check_job_status(jobs)
+ q, r, c, u = check_job_status(jobs, cluster_spec, start_time)
if c == n:
print('All jobs are completed!')
break
time.sleep(delay)
+[docs]
+class GPRRFA:
"""Random Feature Approximation for Gaussian Process Regression
Estimation and prediction of Bayesian linear regression models
@@ -191,11 +200,16 @@
+[docs]
+ def post(self, hyp, X, y):
""" Generic function to compute posterior distribution.
This function will save the posterior mean and precision matrix as
@@ -240,7 +254,10 @@
Source code for rfa
if hasattr(self, '_iterations'):
self._iterations += 1
+[docs]
+ def dloglik(self, hyp, X, y):
""" Function to compute derivatives """
print("derivatives not available")
return
-
[docs] def estimate(self, hyp0, X, y, optimizer='lbfgs'):
+
+
+[docs]
+ def estimate(self, hyp0, X, y, optimizer='lbfgs'):
""" Function to estimate the model """
if type(hyp0) is torch.Tensor:
@@ -324,7 +347,10 @@
+[docs]
+ def predict(self, hyp, X, y, Xs):
""" Function to make predictions from the model """
X, y, hyp = self._numpy2torch(X, y, hyp)
@@ -347,7 +373,9 @@
+
# For running from the command line:
if __name__ == "__main__":
main(sys.argv[1:])
diff --git a/doc/build/html/_sources/pages/BLR_normativemodel_protocol.rst.txt b/doc/build/html/_sources/pages/BLR_normativemodel_protocol.rst.txt
index 479b5163..4c23523b 100644
--- a/doc/build/html/_sources/pages/BLR_normativemodel_protocol.rst.txt
+++ b/doc/build/html/_sources/pages/BLR_normativemodel_protocol.rst.txt
@@ -1,27 +1,22 @@
-.. title:: BLR tutorial
+`Predictive Clinical Neuroscience Toolkit `__
+======================================================================================
-Bayesian Linear Regression
+The Normative Modeling Framework for Computational Psychiatry Protocol
======================================================================
-The Normative Modeling Framework for Computational Psychiatry. Nature Protocols. https://www.nature.com/articles/s41596-022-00696-5.
+Using Bayesian Linear Regression and Multi-Site Cortical Thickness Data
+-----------------------------------------------------------------------
Created by `Saige Rutherford `__
-Using Multi-Site Cortical Thickness Data
-
-.. image:: https://colab.research.google.com/assets/colab-badge.svg
- :target: https://colab.research.google.com/github/predictive-clinical-neuroscience/PCNtoolkit-demo/blob/main/tutorials/BLR_protocol/BLR_normativemodel_protocol.ipynb
-
-
-.. figure:: ./blr_fig2.png
- :height: 400px
- :align: center
-
Data Preparation
----------------------------------------------
+================
Install necessary libraries & grab data files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------
+
+Step 1.
+~~~~~~~
Begin by cloning the GitHub repository using the following commands.
This repository contains the necessary code and example data. Then
@@ -33,33 +28,28 @@ your computer).
! git clone https://github.com/predictive-clinical-neuroscience/PCNtoolkit-demo.git
-
-.. parsed-literal::
-
- Cloning into 'PCNtoolkit-demo'...
- remote: Enumerating objects: 855, done.[K
- remote: Counting objects: 100% (855/855), done.[K
- remote: Compressing objects: 100% (737/737), done.[K
- remote: Total 855 (delta 278), reused 601 (delta 101), pack-reused 0[K
- Receiving objects: 100% (855/855), 18.07 MiB | 13.53 MiB/s, done.
- Resolving deltas: 100% (278/278), done.
-
-
.. code:: ipython3
import os
# set this path to the git cloned PCNtoolkit-demo repository --> Uncomment whichever line you need for either running on your own computer or on Google Colab.
- #os.chdir('/Users/PCNtoolkit-demo/tutorials/BLR_protocol') # if running on your own computer, use this line (change the path to match where you cloned the repository)
- os.chdir('/content/PCNtoolkit-demo/tutorials/BLR_protocol') # if running on Google Colab, use this line
+ #wdir = '/PCNtoolkit-demo' # if running on your own computer, use this line (change the path to match where you cloned the repository)
+ wdir ='/content/PCNtoolkit-demo' # if running on Google Colab, use this line
+
+ os.chdir(os.path.join(wdir,'tutorials','BLR_protocol'))
+
.. code:: ipython3
+ ! pip install nutpie
+ ! pip install pcntoolkit
! pip install -r requirements.txt
-
Prepare covariate data
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------
+
+Step 2.
+~~~~~~~
The data set (downloaded in Step 1) includes a multi-site dataset from
the `Human Connectome Project Young Adult
@@ -88,8 +78,8 @@ depending on the research question.
.. code:: ipython3
# if running in Google colab, remove the "data/" folder from the path
- hcp = pd.read_csv('/content/PCNtoolkit-demo/data/HCP1200_age_gender.csv')
- ixi = pd.read_csv('/content/PCNtoolkit-demo/data/IXI_age_gender.csv')
+ hcp = pd.read_csv(os.path.join(wdir,'data','HCP1200_age_gender.csv'))
+ ixi = pd.read_csv(os.path.join(wdir,'data','IXI_age_gender.csv'))
.. code:: ipython3
@@ -98,8 +88,8 @@ depending on the research question.
.. parsed-literal::
- /usr/local/lib/python3.7/dist-packages/pandas/core/reshape/merge.py:1218: UserWarning: You are merging on int and float columns where the float values are not equal to their int representation
- UserWarning,
+ :1: UserWarning: You are merging on int and float columns where the float values are not equal to their int representation.
+ cov = pd.merge(hcp, ixi, on=["participant_id", "age", "sex", "site"], how='outer')
.. code:: ipython3
@@ -108,29 +98,348 @@ depending on the research question.
.. code:: ipython3
- sns.displot(cov, x="age", hue="site", multiple="stack", height=6)
+ sns.displot(cov, x="age", hue="site", multiple="stack", height=6);
+.. image:: BLR_normativemodel_protocol_files/BLR_normativemodel_protocol_16_0.png
-.. parsed-literal::
-
+.. code:: ipython3
+ cov.groupby(['site']).describe()
-.. image:: BLR_normativemodel_protocol_files/BLR_normativemodel_protocol_15_1.png
+.. raw:: html
+
+
+
+
+
+
+
+
+
+
age
+
sex
+
+
+
+
count
+
mean
+
std
+
min
+
25%
+
50%
+
75%
+
max
+
count
+
mean
+
std
+
min
+
25%
+
50%
+
75%
+
max
+
+
+
site
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
hcp
+
1206.0
+
28.837479
+
3.690534
+
22.000000
+
26.000000
+
29.00000
+
32.000000
+
37.00000
+
1206.0
+
1.543947
+
0.498272
+
1.0
+
1.0
+
2.0
+
2.0
+
2.0
+
+
+
ixi
+
590.0
+
49.476531
+
16.720864
+
19.980835
+
34.027721
+
50.61191
+
63.413415
+
86.31896
+
590.0
+
1.555932
+
0.497283
+
1.0
+
1.0
+
2.0
+
2.0
+
2.0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-.. code:: ipython3
- cov.groupby(['site']).describe()
+Preprare brain data
+-------------------
-Prepare brain data
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Step 3.
+~~~~~~~
Next, format and combine the MRI data using the following commands. The
example data contains cortical thickness maps estimated by running
@@ -139,7 +448,7 @@ was reduced by using ROIs from the Desikan-Killiany atlas. Including the
Euler number as a covariate is also recommended, as this is a proxy
metric for data quality. The `Euler
number `__ from
-each subjects recon-all output folder was extracted into a text file
+each subject’s recon-all output folder was extracted into a text file
and is merged into the cortical thickness data frame. The Euler number
is site-specific, thus, to use the same exclusion threshold across sites
it is important to center the site by subtracting the site median from
@@ -147,7 +456,7 @@ all subjects at a site. Then take the square root and multiply by
negative one and exclude any subjects with a square root above 10.
Here is some psuedo-code (run from a terminal in the folder that has all
-subjects recon-all output folders) that was used to extract these ROIs:
+subject’s recon-all output folders) that was used to extract these ROIs:
``export SUBJECTS_DIR=/path/to/study/freesurfer_data/``
@@ -157,14 +466,14 @@ subjects recon-all output folders) that was used to extract these ROIs:
.. code:: ipython3
- hcpya = pd.read_csv('/content/PCNtoolkit-demo/data/HCP1200_aparc_thickness.csv')
- ixi = pd.read_csv('/content/PCNtoolkit-demo/data/IXI_aparc_thickness.csv')
+ hcpya = pd.read_csv(os.path.join(wdir,'data','HCP1200_aparc_thickness.csv'))
+ ixi = pd.read_csv(os.path.join(wdir,'data','IXI_aparc_thickness.csv'))
.. code:: ipython3
brain_all = pd.merge(ixi, hcpya, how='outer')
-We extracted the euler number from each subjects recon-all output
+We extracted the euler number from each subject’s recon-all output
folder into a text file and we now need to format and combine these into
our brain dataframe.
@@ -173,12 +482,12 @@ recon-all.log for each subject. Run this from the terminal in the folder
where your subjects recon-all output folders are located. This assumes
that all of your subject IDs start with “sub-” prefix.
-:literal:`for i in sub-*; do if [[ -e ${i}/scripts/recon-all.log ]]; then cat ${i}/scripts/recon-all.log | grep -A 1 "Computing euler" > temp_log; lh_en=$(cat temp_log | head -2 | tail -1 | awk -F '=' '{print $2}' | awk -F ',' '{print $1}'); rh_en=$(cat temp_log | head -2 | tail -1 | awk -F '=' '{print $3}'); echo "${i}, ${lh_en}, ${rh_en}" >> euler.csv; echo ${i}; fi; done`
+:literal:`for i in sub-\*; do if [[ -e ${i}/scripts/recon-all.log ]]; then cat ${i}/scripts/recon-all.log | grep -A 1 "Computing euler" > temp_log; lh_en=`cat temp_log | head -2 | tail -1 | awk -F '=' '{print $2}' | awk -F ',' '{print $1}'\`; rh_en=`cat temp_log | head -2 | tail -1 | awk -F '=' '{print $3}'\`; echo "${i}, ${lh_en}, ${rh_en}" >> euler.csv; echo ${i}; fi; done`
.. code:: ipython3
- hcp_euler = pd.read_csv('/content/PCNtoolkit-demo/data/hcp-ya_euler.csv')
- ixi_euler = pd.read_csv('/content/PCNtoolkit-demo/data/ixi_euler.csv')
+ hcp_euler = pd.read_csv(os.path.join(wdir,'data','hcp-ya_euler.csv'))
+ ixi_euler = pd.read_csv(os.path.join(wdir,'data','ixi_euler.csv'))
.. code:: ipython3
@@ -221,7 +530,7 @@ inclusion is not too strict or too lenient.
.. code:: ipython3
- df_euler.groupby(by='site').median()
+ df_euler.groupby(by='site')[['lh_euler', 'rh_euler', 'avg_euler']].median()
@@ -229,9 +538,8 @@ inclusion is not too strict or too lenient.
.. raw:: html
-