From e055b5429ac1cd9f07d86988205991a0245ca34c Mon Sep 17 00:00:00 2001 From: Taco de Wolff Date: Thu, 10 Dec 2020 15:26:09 +0100 Subject: [PATCH] Release 0.2.2 --- docs/bnse.html | 42 ++--- docs/data.html | 146 ++++++++++------ docs/dataset.html | 70 ++++---- docs/errors.html | 10 +- docs/{kernels => gpr}/config.html | 110 ++++++++---- docs/gpr/distribution.html | 185 ++++++++++++++++++++ docs/{kernels => gpr}/index.html | 43 ++--- docs/{kernels => gpr}/kernel.html | 141 +++++++-------- docs/{kernels => gpr}/mean.html | 14 +- docs/{kernels => gpr}/model.html | 148 ++++++++-------- docs/{kernels => gpr}/multioutput.html | 88 +++++----- docs/{kernels => gpr}/parameter.html | 100 +++++------ docs/{kernels => gpr}/singleoutput.html | 217 ++++++++++++++++++------ docs/{kernels => gpr}/util.html | 8 +- docs/index.html | 47 ++--- docs/model.html | 71 ++++---- docs/{ => models}/conv.html | 82 ++++----- docs/{ => models}/csm.html | 82 ++++----- docs/models/index.html | 87 ++++++++++ docs/{ => models}/mosm.html | 102 +++++------ docs/{ => models}/sm.html | 90 +++++----- docs/{ => models}/sm_lmc.html | 82 ++++----- docs/plot.html | 4 +- docs/serie.html | 62 +++---- setup.py | 4 +- 25 files changed, 1248 insertions(+), 787 deletions(-) rename docs/{kernels => gpr}/config.html (60%) create mode 100644 docs/gpr/distribution.html rename docs/{kernels => gpr}/index.html (74%) rename docs/{kernels => gpr}/kernel.html (77%) rename docs/{kernels => gpr}/mean.html (91%) rename docs/{kernels => gpr}/model.html (83%) rename docs/{kernels => gpr}/multioutput.html (83%) rename docs/{kernels => gpr}/parameter.html (79%) rename docs/{kernels => gpr}/singleoutput.html (67%) rename docs/{kernels => gpr}/util.html (94%) rename docs/{ => models}/conv.html (86%) rename docs/{ => models}/csm.html (87%) create mode 100644 docs/models/index.html rename docs/{ => models}/mosm.html (89%) rename docs/{ => models}/sm.html (87%) rename docs/{ => models}/sm_lmc.html (87%) diff --git a/docs/bnse.html b/docs/bnse.html index f0805123..a6c2903f 100644 --- a/docs/bnse.html +++ b/docs/bnse.html @@ -25,7 +25,7 @@

Module mogptk.bnse

Expand source code -Browse git +Browse git
import numpy as np
 import matplotlib.pyplot as plt
@@ -271,7 +271,7 @@ 

Functions

Expand source code -Browse git +Browse git
def Spec_Mix(x,y, gamma, theta, sigma=1):
     return sigma**2 * np.exp(-gamma*outersum(x,-y)**2)*np.cos(2*np.pi*theta*outersum(x,-y))
@@ -285,7 +285,7 @@

Functions

Expand source code -Browse git +Browse git
def Spec_Mix_sine(x,y, gamma, theta, sigma=1):
     return sigma**2 * np.exp(-gamma*outersum(x,-y)**2)*np.sin(2*np.pi*theta*outersum(x,-y))
@@ -299,7 +299,7 @@

Functions

Expand source code -Browse git +Browse git
def Spec_Mix_spectral(x, y, alpha, gamma, theta, sigma=1):
     magnitude = np.pi * sigma**2 / (np.sqrt(alpha*(alpha + 2*gamma)))
@@ -314,7 +314,7 @@ 

Functions

Expand source code -Browse git +Browse git
def freq_covariances(x, y, alpha, gamma, theta, sigma=1, kernel = 'sm'):
     if kernel == 'sm':
@@ -335,7 +335,7 @@ 

Functions

Expand source code -Browse git +Browse git
def outersum(a,b):
     # equivalent to np.outer(a,np.ones_like(b))+np.outer(np.ones_like(a),b) when a and b are arrays
@@ -351,7 +351,7 @@ 

Functions

Expand source code -Browse git +Browse git
def time_freq_SM_im(x, y, alpha, gamma, theta, sigma=1):
     at = alpha/(np.pi**2)
@@ -368,7 +368,7 @@ 

Functions

Expand source code -Browse git +Browse git
def time_freq_SM_re(x, y, alpha, gamma, theta, sigma=1):
     at = alpha/(np.pi**2)
@@ -385,7 +385,7 @@ 

Functions

Expand source code -Browse git +Browse git
def time_freq_covariances(x, t, alpha, gamma, theta, sigma, kernel = 'sm'):
     if kernel == 'sm':
@@ -408,7 +408,7 @@ 

Classes

Expand source code -Browse git +Browse git
class bse:
     def __init__(self, space_input, space_output):
@@ -599,7 +599,7 @@ 

Methods

Expand source code -Browse git +Browse git
def compute_moments(self):
     #posterior moments for time
@@ -628,7 +628,7 @@ 

Methods

Expand source code -Browse git +Browse git
def dnlogp(self, hypers):
     sigma = np.exp(hypers[0])
@@ -662,7 +662,7 @@ 

Methods

Expand source code -Browse git +Browse git
def get_freq_peaks(self):
     x = self.w
@@ -691,7 +691,7 @@ 

Methods

Expand source code -Browse git +Browse git
def neg_log_likelihood(self):
     Y = self.y
@@ -709,7 +709,7 @@ 

Methods

Expand source code -Browse git +Browse git
def nlogp(self, hypers):
     sigma = np.exp(hypers[0])
@@ -732,7 +732,7 @@ 

Methods

Expand source code -Browse git +Browse git
def plot_freq_posterior(self):
     #posterior moments for frequency
@@ -767,7 +767,7 @@ 

Methods

Expand source code -Browse git +Browse git
def plot_power_spectral_density(self, how_many, flag=None):
     #posterior moments for frequency
@@ -800,7 +800,7 @@ 

Methods

Expand source code -Browse git +Browse git
def plot_time_posterior(self, flag=None):
     #posterior moments for time
@@ -828,7 +828,7 @@ 

Methods

Expand source code -Browse git +Browse git
def set_freqspace(self, max_freq, dimension=500):
     self.w = np.linspace(0, max_freq, dimension)
@@ -842,7 +842,7 @@

Methods

Expand source code -Browse git +Browse git
def set_labels(self, time_label, signal_label):
     self.time_label = time_label
@@ -857,7 +857,7 @@ 

Methods

Expand source code -Browse git +Browse git
def train(self):
     hypers0 = np.array([np.log(self.sigma), np.log(self.gamma), np.log(self.theta), np.log(self.sigma_n)])
diff --git a/docs/data.html b/docs/data.html
index c0e18379..bcb3cfeb 100644
--- a/docs/data.html
+++ b/docs/data.html
@@ -25,7 +25,7 @@ 

Module mogptk.data

Expand source code -Browse git +Browse git
import re
 import copy
@@ -548,6 +548,14 @@ 

Module mogptk.data

return X, np.array(self.Y[~self.mask]) ################################################################ + + def reset(self): + """ + Reset the data set and undo the removal of data points. That is, this reverts any calls to `remove_randomly`, `remove_range`, `remove_relative_range`, `remove_random_ranges`, and `remove_index`. + """ + self.mask[:] = True + for i in range(len(self.removed_ranges)): + self.removed_ranges[i] = [] def remove_randomly(self, n=None, pct=None): """ @@ -1004,7 +1012,8 @@

Module mogptk.data

transformed (boolean): Display transformed Y data as used for training. Returns: - matplotlib.axes.Axes + matplotlib.figure.Figure: only if `ax` is not set + matplotlib.axes.Axes: only if `ax` is not set Examples: >>> ax = data.plot() @@ -1015,8 +1024,9 @@

Module mogptk.data

if self.get_input_dims() == 2: raise NotImplementedError("two dimensional input data not yet implemented") # TODO + fig = None if ax is None: - _, ax = plt.subplots(1, 1, figsize=(12, 3.0), squeeze=True, constrained_layout=True) + fig, ax = plt.subplots(1, 1, figsize=(12, 3.0), squeeze=True, constrained_layout=True) legends = [] colors = list(matplotlib.colors.TABLEAU_COLORS) @@ -1092,8 +1102,10 @@

Module mogptk.data

if legend: legend_rows = (len(legends)-1)/5 + 1 - ax.legend(handles=legends, loc="upper center", bbox_to_anchor=(0.5,(3.0+0.7+0.3*legend_rows)/3.0), ncol=5) - return ax + ax.legend(handles=legends, loc="upper center", bbox_to_anchor=(0.5,(3.0+0.5+0.3*legend_rows)/3.0), ncol=5) + + if fig is not None: + return fig, ax def plot_spectrum(self, title=None, method='ls', ax=None, per=None, maxfreq=None, transformed=False): """ @@ -1363,7 +1375,7 @@

Examples

Expand source code -Browse git +Browse git
def LoadFunction(f, start, end, n, var=0.0, name="", random=False):
     """
@@ -1489,7 +1501,7 @@ 

Examples

Expand source code -Browse git +Browse git
class Data:
     def __init__(self, X, Y, name=None, x_labels=None, y_label=None):
@@ -1896,6 +1908,14 @@ 

Examples

return X, np.array(self.Y[~self.mask]) ################################################################ + + def reset(self): + """ + Reset the data set and undo the removal of data points. That is, this reverts any calls to `remove_randomly`, `remove_range`, `remove_relative_range`, `remove_random_ranges`, and `remove_index`. + """ + self.mask[:] = True + for i in range(len(self.removed_ranges)): + self.removed_ranges[i] = [] def remove_randomly(self, n=None, pct=None): """ @@ -2352,7 +2372,8 @@

Examples

transformed (boolean): Display transformed Y data as used for training. Returns: - matplotlib.axes.Axes + matplotlib.figure.Figure: only if `ax` is not set + matplotlib.axes.Axes: only if `ax` is not set Examples: >>> ax = data.plot() @@ -2363,8 +2384,9 @@

Examples

if self.get_input_dims() == 2: raise NotImplementedError("two dimensional input data not yet implemented") # TODO + fig = None if ax is None: - _, ax = plt.subplots(1, 1, figsize=(12, 3.0), squeeze=True, constrained_layout=True) + fig, ax = plt.subplots(1, 1, figsize=(12, 3.0), squeeze=True, constrained_layout=True) legends = [] colors = list(matplotlib.colors.TABLEAU_COLORS) @@ -2440,8 +2462,10 @@

Examples

if legend: legend_rows = (len(legends)-1)/5 + 1 - ax.legend(handles=legends, loc="upper center", bbox_to_anchor=(0.5,(3.0+0.7+0.3*legend_rows)/3.0), ncol=5) - return ax + ax.legend(handles=legends, loc="upper center", bbox_to_anchor=(0.5,(3.0+0.5+0.3*legend_rows)/3.0), ncol=5) + + if fig is not None: + return fig, ax def plot_spectrum(self, title=None, method='ls', ax=None, per=None, maxfreq=None, transformed=False): """ @@ -2574,7 +2598,7 @@

Examples

Expand source code -Browse git +Browse git
def aggregate(self, duration, f=np.mean):
     """
@@ -2617,7 +2641,7 @@ 

Examples

Expand source code -Browse git +Browse git
def clear_predictions(self):
     """
@@ -2643,7 +2667,7 @@ 

Examples

Expand source code -Browse git +Browse git
def copy(self):
     """
@@ -2678,7 +2702,7 @@ 

Examples

Expand source code -Browse git +Browse git
def filter(self, start, end):
     """
@@ -2728,7 +2752,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_bnse_estimation(self, Q=1, n=1000):
     """
@@ -2798,7 +2822,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_data(self, transformed=False):
     """
@@ -2839,7 +2863,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_input_dims(self):
     """
@@ -2877,7 +2901,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_lombscargle_estimation(self, Q=1, n=10000):
     """
@@ -2950,7 +2974,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_name(self):
     """
@@ -2979,7 +3003,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_nyquist_estimation(self):
     """
@@ -3027,7 +3051,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_prediction(self, name, sigma=2.0, transformed=False):
     """
@@ -3081,7 +3105,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_prediction_names(self):
     """
@@ -3110,7 +3134,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_prediction_x(self):
     """
@@ -3155,7 +3179,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_sm_estimation(self, Q=1, method='BNSE', optimizer='Adam', iters=100, params={}, plot=False):
     """
@@ -3224,7 +3248,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_test_data(self, transformed=False):
     """
@@ -3272,7 +3296,7 @@ 

Examples

Expand source code -Browse git +Browse git
def get_train_data(self, transformed=False):
     """
@@ -3313,7 +3337,7 @@ 

Examples

Expand source code -Browse git +Browse git
def has_test_data(self):
     """
@@ -3348,17 +3372,15 @@ 

Args

Display transformed Y data as used for training.

Returns

-
-
matplotlib.axes.Axes
-
 
-
+

matplotlib.figure.Figure: only if ax is not set +matplotlib.axes.Axes: only if ax is not set

Examples

>>> ax = data.plot()
 
Expand source code -Browse git +Browse git
def plot(self, pred=None, title=None, ax=None, legend=True, transformed=False):
     """
@@ -3372,7 +3394,8 @@ 

Examples

transformed (boolean): Display transformed Y data as used for training. Returns: - matplotlib.axes.Axes + matplotlib.figure.Figure: only if `ax` is not set + matplotlib.axes.Axes: only if `ax` is not set Examples: >>> ax = data.plot() @@ -3383,8 +3406,9 @@

Examples

if self.get_input_dims() == 2: raise NotImplementedError("two dimensional input data not yet implemented") # TODO + fig = None if ax is None: - _, ax = plt.subplots(1, 1, figsize=(12, 3.0), squeeze=True, constrained_layout=True) + fig, ax = plt.subplots(1, 1, figsize=(12, 3.0), squeeze=True, constrained_layout=True) legends = [] colors = list(matplotlib.colors.TABLEAU_COLORS) @@ -3460,8 +3484,10 @@

Examples

if legend: legend_rows = (len(legends)-1)/5 + 1 - ax.legend(handles=legends, loc="upper center", bbox_to_anchor=(0.5,(3.0+0.7+0.3*legend_rows)/3.0), ncol=5) - return ax
+ ax.legend(handles=legends, loc="upper center", bbox_to_anchor=(0.5,(3.0+0.5+0.3*legend_rows)/3.0), ncol=5) + + if fig is not None: + return fig, ax
@@ -3495,7 +3521,7 @@

Examples

Expand source code -Browse git +Browse git
def plot_spectrum(self, title=None, method='ls', ax=None, per=None, maxfreq=None, transformed=False):
     """
@@ -3593,7 +3619,7 @@ 

Args

Expand source code -Browse git +Browse git
def remove_index(self, index):
     """
@@ -3630,7 +3656,7 @@ 

Examples

Expand source code -Browse git +Browse git
def remove_random_ranges(self, n, duration):
     """
@@ -3686,7 +3712,7 @@ 

Examples

Expand source code -Browse git +Browse git
def remove_randomly(self, n=None, pct=None):
     """
@@ -3733,7 +3759,7 @@ 

Examples

Expand source code -Browse git +Browse git
def remove_range(self, start=None, end=None):
     """
@@ -3781,7 +3807,7 @@ 

Args

Expand source code -Browse git +Browse git
def remove_relative_range(self, start=0.0, end=1.0):
     """
@@ -3824,7 +3850,7 @@ 

Examples

Expand source code -Browse git +Browse git
def rescale_x(self, upper=1000.0):
     """
@@ -3846,6 +3872,25 @@ 

Examples

self.X[i].apply(t)
+
+def reset(self) +
+
+

Reset the data set and undo the removal of data points. That is, this reverts any calls to remove_randomly, remove_range, remove_relative_range, remove_random_ranges, and remove_index.

+
+ +Expand source code +Browse git + +
def reset(self):
+    """
+    Reset the data set and undo the removal of data points. That is, this reverts any calls to `remove_randomly`, `remove_range`, `remove_relative_range`, `remove_random_ranges`, and `remove_index`.
+    """
+    self.mask[:] = True
+    for i in range(len(self.removed_ranges)):
+        self.removed_ranges[i] = []
+
+
def set_function(self, f)
@@ -3863,7 +3908,7 @@

Examples

Expand source code -Browse git +Browse git
def set_function(self, f):
     """
@@ -3899,7 +3944,7 @@ 

Examples

Expand source code -Browse git +Browse git
def set_labels(self, x_labels, y_label):
     """
@@ -3941,7 +3986,7 @@ 

Examples

Expand source code -Browse git +Browse git
def set_name(self, name):
     """
@@ -3983,7 +4028,7 @@ 

Examples

Expand source code -Browse git +Browse git
def set_prediction_range(self, start=None, end=None, n=None, step=None):
     """
@@ -4056,7 +4101,7 @@ 

Examples

Expand source code -Browse git +Browse git
def set_prediction_x(self, X):
     """
@@ -4107,7 +4152,7 @@ 

Examples

Expand source code -Browse git +Browse git
def transform(self, transformer):
     """
@@ -4185,6 +4230,7 @@ 

Dataremove_range
  • remove_relative_range
  • rescale_x
  • +
  • reset
  • set_function
  • set_labels
  • set_name
  • diff --git a/docs/dataset.html b/docs/dataset.html index 9c6a5052..a6d5c35d 100644 --- a/docs/dataset.html +++ b/docs/dataset.html @@ -25,7 +25,7 @@

    Module mogptk.dataset

    Expand source code -Browse git +Browse git
    import copy
     
    @@ -678,8 +678,8 @@ 

    Module mogptk.dataset

    legends = {} for channel in range(self.get_output_dims()): - ax = self.channels[channel].plot(pred=pred, ax=axes[channel,0], transformed=transformed) - legend = ax.get_legend() + self.channels[channel].plot(pred=pred, ax=axes[channel,0], transformed=transformed) + legend = axes[channel,0].get_legend() for text, handle in zip(legend.texts, legend.legendHandles): if text.get_text() == "Training Points": handle = plt.Line2D([0], [0], ls='-', color='k', marker='.', ms=10, label='Training Points') @@ -770,7 +770,7 @@

    Examples

    Expand source code -Browse git +Browse git
    def LoadCSV(filename, x_col=0, y_col=1, name=None, **kwargs):
         """
    @@ -827,7 +827,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def LoadDataFrame(df, x_col=0, y_col=1, name=None):
         """
    @@ -940,7 +940,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    class DataSet:
         """
    @@ -1490,8 +1490,8 @@ 

    Examples

    legends = {} for channel in range(self.get_output_dims()): - ax = self.channels[channel].plot(pred=pred, ax=axes[channel,0], transformed=transformed) - legend = ax.get_legend() + self.channels[channel].plot(pred=pred, ax=axes[channel,0], transformed=transformed) + legend = axes[channel,0].get_legend() for text, handle in zip(legend.texts, legend.legendHandles): if text.get_text() == "Training Points": handle = plt.Line2D([0], [0], ls='-', color='k', marker='.', ms=10, label='Training Points') @@ -1565,7 +1565,7 @@

    Examples

    Expand source code -Browse git +Browse git
    def aggregate(self, duration, f=np.mean):
         """
    @@ -1602,7 +1602,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def append(self, arg):
         """
    @@ -1639,7 +1639,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def clear_predictions(self):
         """
    @@ -1665,7 +1665,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def copy(self):
         """
    @@ -1700,7 +1700,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def filter(self, start, end):
         """
    @@ -1737,7 +1737,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get(self, index):
         """
    @@ -1789,7 +1789,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_bnse_estimation(self, Q=1, n=1000):
         """
    @@ -1841,7 +1841,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_data(self, transformed=False):
         """
    @@ -1881,7 +1881,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_index(self, index):
         """
    @@ -1923,7 +1923,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_input_dims(self):
         """
    @@ -1966,7 +1966,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_lombscargle_estimation(self, Q=1, n=10000):
         """
    @@ -2012,7 +2012,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_names(self):
         """
    @@ -2044,7 +2044,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_nyquist_estimation(self):
         """
    @@ -2079,7 +2079,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_output_dims(self):
         """
    @@ -2126,7 +2126,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_prediction(self, name, sigma=2.0, transformed=False):
         """
    @@ -2175,7 +2175,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_prediction_x(self):
         """
    @@ -2228,7 +2228,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_sm_estimation(self, Q=1, method='BNSE', optimizer='Adam', iters=100, params={}, plot=False):
         """
    @@ -2284,7 +2284,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_test_data(self, transformed=False):
         """
    @@ -2326,7 +2326,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_train_data(self, transformed=False):
         """
    @@ -2372,7 +2372,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def plot(self, pred=None, title=None, figsize=None, legend=True, transformed=False):
         """
    @@ -2400,8 +2400,8 @@ 

    Examples

    legends = {} for channel in range(self.get_output_dims()): - ax = self.channels[channel].plot(pred=pred, ax=axes[channel,0], transformed=transformed) - legend = ax.get_legend() + self.channels[channel].plot(pred=pred, ax=axes[channel,0], transformed=transformed) + legend = axes[channel,0].get_legend() for text, handle in zip(legend.texts, legend.legendHandles): if text.get_text() == "Training Points": handle = plt.Line2D([0], [0], ls='-', color='k', marker='.', ms=10, label='Training Points') @@ -2445,7 +2445,7 @@

    Examples

    Expand source code -Browse git +Browse git
    def plot_spectrum(self, title=None, method='ls', per=None, maxfreq=None, figsize=None, transformed=False):
         """
    @@ -2501,7 +2501,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def rescale_x(self, upper=1000.0):
         """
    @@ -2540,7 +2540,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def set_prediction_range(self, start, end, n=None, step=None):
         """
    @@ -2601,7 +2601,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def set_prediction_x(self, x):
         """
    @@ -2648,7 +2648,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def transform(self, transformer):
         """
    diff --git a/docs/errors.html b/docs/errors.html
    index e884c60a..6adee94f 100644
    --- a/docs/errors.html
    +++ b/docs/errors.html
    @@ -25,7 +25,7 @@ 

    Module mogptk.errors

    Expand source code -Browse git +Browse git
    import numpy as np
     import pandas as pd
    @@ -173,7 +173,7 @@ 

    Example

    Expand source code -Browse git +Browse git
    def error(*models, X=None, Y=None, per_channel=False, transformed=False, disp=False):
         """
    @@ -266,7 +266,7 @@ 

    Example

    Expand source code -Browse git +Browse git
    def mean_absolute_error(y_true, y_pred):
         """
    @@ -284,7 +284,7 @@ 

    Example

    Expand source code -Browse git +Browse git
    def mean_absolute_percentage_error(y_true, y_pred):
         """
    @@ -304,7 +304,7 @@ 

    Example

    Expand source code -Browse git +Browse git
    def root_mean_squared_error(y_true, y_pred):
         """
    diff --git a/docs/kernels/config.html b/docs/gpr/config.html
    similarity index 60%
    rename from docs/kernels/config.html
    rename to docs/gpr/config.html
    index c91d4f3d..1714b1a9 100644
    --- a/docs/kernels/config.html
    +++ b/docs/gpr/config.html
    @@ -4,7 +4,7 @@
     
     
     
    -mogptk.kernels.config API documentation
    +mogptk.gpr.config API documentation
     
     
     
    @@ -19,13 +19,13 @@
     
    -

    Module mogptk.kernels.config

    +

    Module mogptk.gpr.config

    Expand source code -Browse git +Browse git
    import torch
     
    @@ -39,18 +39,30 @@ 

    Module mogptk.kernels.config

    config = Config() def use_single_precision(): + """ + Use single precision (float32) for all tensors. This may be much faster on GPUs, but has reduced precision and may more often cause numerical instability. + """ config.dtype = torch.float32 def use_double_precision(): + """ + Use double precision (float64) for all tensors. This is the recommended precision for numerical stability, but can be significantly slower. + """ config.dtype = torch.float64 def use_cpu(n=None): + """ + Use the CPU instead of the GPU for tensor calculations. This is the default if no GPU is available. If you have more than one CPU, you can use a specific CPU by setting `n`. + """ if n is None: config.device = torch.device('cpu') else: config.device = torch.device('cpu', n) def use_gpu(n=None): + """ + Use the GPU instead of the CPU for tensor calculations. This is the default if a GPU is available. If you have more than one GPU, you can use a specific GPU by setting `n`. + """ if not torch.cuda.is_available(): logger.error("CUDA is not available") elif n is not None and (not isinstance(n, int) or n < 0 or torch.cuda.device_count() <= n): @@ -61,6 +73,9 @@

    Module mogptk.kernels.config

    config.device = torch.device('cuda', n) def print_gpu_information(): + """ + Print information about whether CUDA is supported, and if so which GPU is being used. + """ if not torch.cuda.is_available(): print("CUDA is not available") return @@ -73,6 +88,9 @@

    Module mogptk.kernels.config

    print("%2d %s%s" % (n, torch.cuda.get_device_name(n), " (selected)" if n == current else "")) def set_positive_minimum(val): + """ + Set the positive minimum for kernel parameters. This is usually slightly larger than zero to avoid numerical instabilities. Default is at 1e-8. + """ config.positive_minimum = val
    @@ -83,17 +101,20 @@

    Module mogptk.kernels.config

    Functions

    -
    +
    def print_gpu_information()
    -
    +

    Print information about whether CUDA is supported, and if so which GPU is being used.

    Expand source code -Browse git +Browse git
    def print_gpu_information():
    +    """
    +    Print information about whether CUDA is supported, and if so which GPU is being used.
    +    """
         if not torch.cuda.is_available():
             print("CUDA is not available")
             return
    @@ -106,62 +127,74 @@ 

    Functions

    print("%2d %s%s" % (n, torch.cuda.get_device_name(n), " (selected)" if n == current else ""))
    -
    +
    def set_positive_minimum(val)
    -
    +

    Set the positive minimum for kernel parameters. This is usually slightly larger than zero to avoid numerical instabilities. Default is at 1e-8.

    Expand source code -Browse git +Browse git
    def set_positive_minimum(val):
    +    """
    +    Set the positive minimum for kernel parameters. This is usually slightly larger than zero to avoid numerical instabilities. Default is at 1e-8.
    +    """
         config.positive_minimum = val
    -
    +
    def use_cpu(n=None)
    -
    +

    Use the CPU instead of the GPU for tensor calculations. This is the default if no GPU is available. If you have more than one CPU, you can use a specific CPU by setting n.

    Expand source code -Browse git +Browse git
    def use_cpu(n=None):
    +    """
    +    Use the CPU instead of the GPU for tensor calculations. This is the default if no GPU is available. If you have more than one CPU, you can use a specific CPU by setting `n`.
    +    """
         if n is None:
             config.device = torch.device('cpu')
         else:
             config.device = torch.device('cpu', n)
    -
    +
    def use_double_precision()
    -
    +

    Use double precision (float64) for all tensors. This is the recommended precision for numerical stability, but can be significantly slower.

    Expand source code -Browse git +Browse git
    def use_double_precision():
    +    """
    +    Use double precision (float64) for all tensors. This is the recommended precision for numerical stability, but can be significantly slower.
    +    """
         config.dtype = torch.float64
    -
    +
    def use_gpu(n=None)
    -
    +

    Use the GPU instead of the CPU for tensor calculations. This is the default if a GPU is available. If you have more than one GPU, you can use a specific GPU by setting n.

    Expand source code -Browse git +Browse git
    def use_gpu(n=None):
    +    """
    +    Use the GPU instead of the CPU for tensor calculations. This is the default if a GPU is available. If you have more than one GPU, you can use a specific GPU by setting `n`.
    +    """
         if not torch.cuda.is_available():
             logger.error("CUDA is not available")
         elif n is not None and (not isinstance(n, int) or n < 0 or torch.cuda.device_count() <= n):
    @@ -172,17 +205,20 @@ 

    Functions

    config.device = torch.device('cuda', n)
    -
    +
    def use_single_precision()
    -
    +

    Use single precision (float32) for all tensors. This may be much faster on GPUs, but has reduced precision and may more often cause numerical instability.

    Expand source code -Browse git +Browse git
    def use_single_precision():
    +    """
    +    Use single precision (float32) for all tensors. This may be much faster on GPUs, but has reduced precision and may more often cause numerical instability.
    +    """
         config.dtype = torch.float32
    @@ -191,7 +227,7 @@

    Functions

    Classes

    -
    +
    class Config
    @@ -199,7 +235,7 @@

    Classes

    Expand source code -Browse git +Browse git
    class Config:
         dtype = torch.float64
    @@ -211,15 +247,15 @@ 

    Classes

    Class variables

    -
    var device
    +
    var device
    -
    var dtype
    +
    var dtype
    -
    var positive_minimum
    +
    var positive_minimum

    Convert a string or number to a floating point number, if possible.

    @@ -236,27 +272,27 @@

    Index

    • Super-module

    • Functions

    • Classes

      diff --git a/docs/gpr/distribution.html b/docs/gpr/distribution.html new file mode 100644 index 00000000..3be9a90f --- /dev/null +++ b/docs/gpr/distribution.html @@ -0,0 +1,185 @@ + + + + + + +mogptk.gpr.distribution API documentation + + + + + + + + + + + +
      +
      +
      +

      Module mogptk.gpr.distribution

      +
      +
      +
      + +Expand source code +Browse git + +
      import torch
      +from . import config
      +
      +class Distribution:
      +    def log_p(self, x):
      +        raise NotImplementedError()
      +
      +class Gaussian(Distribution):
      +    def __init__(self, mu=torch.tensor(0.0), sigma=torch.tensor(1.0)):
      +        if mu.shape != sigma.shape:
      +            raise ValueError("mu and sigma need to have the same shape")
      +
      +        self.mu = mu.to(config.device, config.dtype)
      +        self.sigma = sigma.to(config.device, config.dtype)
      +        self.log_p_constant = -0.5*torch.log(2.0*np.pi*sigma)
      +
      +    def log_p(self, x):
      +        if x.shape != self.mu.shape:
      +            raise ValueError("x must match shape of mu and sigma")
      +        return self.log_p_constant - 0.5*(x-self.mu).T.mm(self.sigma.mm(x-self.mu))
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +

      Classes

      +
      +
      +class Distribution +
      +
      +
      +
      + +Expand source code +Browse git + +
      class Distribution:
      +    def log_p(self, x):
      +        raise NotImplementedError()
      +
      +

      Subclasses

      + +

      Methods

      +
      +
      +def log_p(self, x) +
      +
      +
      +
      + +Expand source code +Browse git + +
      def log_p(self, x):
      +    raise NotImplementedError()
      +
      +
      +
      +
      +
      +class Gaussian +(mu=tensor(0.), sigma=tensor(1.)) +
      +
      +
      +
      + +Expand source code +Browse git + +
      class Gaussian(Distribution):
      +    def __init__(self, mu=torch.tensor(0.0), sigma=torch.tensor(1.0)):
      +        if mu.shape != sigma.shape:
      +            raise ValueError("mu and sigma need to have the same shape")
      +
      +        self.mu = mu.to(config.device, config.dtype)
      +        self.sigma = sigma.to(config.device, config.dtype)
      +        self.log_p_constant = -0.5*torch.log(2.0*np.pi*sigma)
      +
      +    def log_p(self, x):
      +        if x.shape != self.mu.shape:
      +            raise ValueError("x must match shape of mu and sigma")
      +        return self.log_p_constant - 0.5*(x-self.mu).T.mm(self.sigma.mm(x-self.mu))
      +
      +

      Ancestors

      + +

      Methods

      +
      +
      +def log_p(self, x) +
      +
      +
      +
      + +Expand source code +Browse git + +
      def log_p(self, x):
      +    if x.shape != self.mu.shape:
      +        raise ValueError("x must match shape of mu and sigma")
      +    return self.log_p_constant - 0.5*(x-self.mu).T.mm(self.sigma.mm(x-self.mu))
      +
      +
      +
      +
      +
      +
      +
      + +
      + + + + + \ No newline at end of file diff --git a/docs/kernels/index.html b/docs/gpr/index.html similarity index 74% rename from docs/kernels/index.html rename to docs/gpr/index.html index 188ddd52..e0d6d1d6 100644 --- a/docs/kernels/index.html +++ b/docs/gpr/index.html @@ -4,7 +4,7 @@ -mogptk.kernels API documentation +mogptk.gpr API documentation @@ -19,13 +19,13 @@
      -

      Module mogptk.kernels

      +

      Module mogptk.gpr

      Expand source code -Browse git +Browse git
      from .config import *
       from .parameter import *
      @@ -39,35 +39,39 @@ 

      Module mogptk.kernels

      Sub-modules

      -
      mogptk.kernels.config
      +
      mogptk.gpr.config
      -
      mogptk.kernels.kernel
      +
      mogptk.gpr.distribution
      -
      mogptk.kernels.mean
      +
      mogptk.gpr.kernel
      -
      mogptk.kernels.model
      +
      mogptk.gpr.mean
      -
      mogptk.kernels.multioutput
      +
      mogptk.gpr.model
      -
      mogptk.kernels.parameter
      +
      mogptk.gpr.multioutput
      -
      mogptk.kernels.singleoutput
      +
      mogptk.gpr.parameter
      -
      mogptk.kernels.util
      +
      mogptk.gpr.singleoutput
      +
      +
      +
      +
      mogptk.gpr.util
      @@ -93,14 +97,15 @@

      Index

    • Sub-modules

    diff --git a/docs/kernels/kernel.html b/docs/gpr/kernel.html similarity index 77% rename from docs/kernels/kernel.html rename to docs/gpr/kernel.html index 25346d9e..4139c33e 100644 --- a/docs/kernels/kernel.html +++ b/docs/gpr/kernel.html @@ -4,7 +4,7 @@ -mogptk.kernels.kernel API documentation +mogptk.gpr.kernel API documentation @@ -19,13 +19,13 @@
    -

    Module mogptk.kernels.kernel

    +

    Module mogptk.gpr.kernel

    Expand source code -Browse git +Browse git
    import torch
     import copy
    @@ -229,7 +229,7 @@ 

    Module mogptk.kernels.kernel

    Classes

    -
    +
    class AddKernel (*kernels, name='Add')
    @@ -238,7 +238,7 @@

    Classes

    Expand source code -Browse git +Browse git
    class AddKernel(Kernel):
         def __init__(self, *kernels, name="Add"):
    @@ -253,15 +253,15 @@ 

    Classes

    Ancestors

    Subclasses

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -269,7 +269,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         return torch.stack([kernel(X1,X2) for kernel in self.kernels], dim=2).sum(dim=2)
    @@ -277,7 +277,7 @@

    Methods

    -
    +
    class AutomaticRelevanceDeterminationKernel (kernel, input_dims, name='ARD')
    @@ -286,7 +286,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class AutomaticRelevanceDeterminationKernel(MulKernel):
         def __init__(self, kernel, input_dims, name="ARD"):
    @@ -297,11 +297,11 @@ 

    Methods

    Ancestors

    -
    +
    class Kernel (input_dims=None, active_dims=None, name=None)
    @@ -310,7 +310,7 @@

    Ancestors

    Expand source code -Browse git +Browse git
    class Kernel:
         def __init__(self, input_dims=None, active_dims=None, name=None):
    @@ -412,26 +412,27 @@ 

    Ancestors

    Subclasses

    Instance variables

    -
    var active_dims
    +
    var active_dims
    Expand source code -Browse git +Browse git
    @property
     def active_dims(self):
    @@ -441,7 +442,7 @@ 

    Instance variables

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -449,13 +450,13 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         raise NotImplementedError()
    -
    +
    def distance(self, X1, X2=None)
    @@ -463,7 +464,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def distance(self, X1, X2=None):
         # X1 is NxD, X2 is MxD, then ret is NxMxD
    @@ -472,7 +473,7 @@ 

    Methods

    return X1.unsqueeze(1) - X2
    -
    +
    def squared_distance(self, X1, X2=None)
    @@ -480,7 +481,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def squared_distance(self, X1, X2=None):
         # X1 is NxD, X2 is MxD, then ret is NxMxD
    @@ -492,7 +493,7 @@ 

    Methods

    -
    +
    class MixtureKernel (kernel, Q, name='Mixture')
    @@ -501,7 +502,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class MixtureKernel(AddKernel):
         def __init__(self, kernel, Q, name="Mixture"):
    @@ -510,11 +511,11 @@ 

    Methods

    Ancestors

    -
    +
    class MulKernel (*kernels, name='Mul')
    @@ -523,7 +524,7 @@

    Ancestors

    Expand source code -Browse git +Browse git
    class MulKernel(Kernel):
         def __init__(self, *kernels, name="Mul"):
    @@ -538,15 +539,15 @@ 

    Ancestors

    Ancestors

    Subclasses

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -554,7 +555,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         return torch.stack([kernel(X1,X2) for kernel in self.kernels], dim=2).prod(dim=2)
    @@ -562,7 +563,7 @@

    Methods

    -
    +
    class MultiOutputKernel (output_dims, input_dims=None, active_dims=None, name=None)
    @@ -571,7 +572,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class MultiOutputKernel(Kernel):
         # The MultiOutputKernel is a base class for multi output kernels. It assumes that the first dimension of X contains channel IDs (integers) and calculate the final kernel matrix accordingly. Concretely, it will call the Ksub method for derived kernels from this class, which should return the kernel matrix between channel i and j, given inputs X1 and X2. This class will automatically split and recombine the input vectors and kernel matrices respectively, in order to create the final kernel matrix of the multi output kernel.
    @@ -631,19 +632,19 @@ 

    Methods

    Ancestors

    Subclasses

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -651,7 +652,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,1+input_dims) where the first column is the channel ID
    @@ -695,7 +696,7 @@ 

    Methods

    return res
    -
    +
    def Ksub(self, i, j, X1, X2=None)
    @@ -703,7 +704,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def Ksub(self, i, j, X1, X2=None):
         raise NotImplementedError()
    @@ -722,43 +723,43 @@

    Index

    • Super-module

    • Classes

      diff --git a/docs/kernels/mean.html b/docs/gpr/mean.html similarity index 91% rename from docs/kernels/mean.html rename to docs/gpr/mean.html index 57477fa5..dfb702b1 100644 --- a/docs/kernels/mean.html +++ b/docs/gpr/mean.html @@ -4,7 +4,7 @@ -mogptk.kernels.mean API documentation +mogptk.gpr.mean API documentation @@ -19,13 +19,13 @@
      -

      Module mogptk.kernels.mean

      +

      Module mogptk.gpr.mean

      Expand source code -Browse git +Browse git
      from . import Parameter
       
      @@ -62,7 +62,7 @@ 

      Module mogptk.kernels.mean

      Classes

      -
      +
      class Mean (name=None)
      @@ -71,7 +71,7 @@

      Classes

      Expand source code -Browse git +Browse git
      class Mean:
           def __init__(self, name=None):
      @@ -108,13 +108,13 @@ 

      Index

      • Super-module

      • Classes

      • diff --git a/docs/kernels/model.html b/docs/gpr/model.html similarity index 83% rename from docs/kernels/model.html rename to docs/gpr/model.html index 99101788..b63a41cc 100644 --- a/docs/kernels/model.html +++ b/docs/gpr/model.html @@ -4,7 +4,7 @@ -mogptk.kernels.model API documentation +mogptk.gpr.model API documentation @@ -19,13 +19,13 @@
        -

        Module mogptk.kernels.model

        +

        Module mogptk.gpr.model

        Expand source code -Browse git +Browse git
        import torch
         import numpy as np
        @@ -38,11 +38,11 @@ 

        Module mogptk.kernels.model

        class Model: def __init__(self, kernel, X, y, mean=None, name=None): if not issubclass(type(kernel), Kernel): - raise ValueError("kernel must derive from mogptk.kernels.Kernel") + raise ValueError("kernel must derive from mogptk.gpr.Kernel") X, y = self._check_input(X, y) if mean is not None: if not issubclass(type(mean), Mean): - raise ValueError("mean must derive from mogptk.kernels.Mean") + raise ValueError("mean must derive from mogptk.gpr.Mean") mu = mean(X).reshape(-1,1) if mu.shape != y.shape: raise ValueError("mean and y data must match shapes: %s != %s" % (mu.shape, y.shape)) @@ -152,7 +152,7 @@

        Module mogptk.kernels.model

        except Exception as e: vals = [["Name", "Range", "Value"]] for name, p in zip(self._param_names, self._params): - vals.append([name, param_range(p.lower, p.upper, p.trainable), str(p.constrained.detach().numy())]) + vals.append([name, param_range(p.lower, p.upper, p.trainable), str(p.constrained.detach().numpy())]) nameWidth = max([len(val[0]) for val in vals]) rangeWidth = max([len(val[1]) for val in vals]) @@ -197,11 +197,11 @@

        Module mogptk.kernels.model

        if n is None: S = 1 - mu, var = self.predict(Z, full_var=True) # MxD and MxMxD + mu, var = self.predict(Z, full=True, numpy=False) # MxD and MxMxD u = torch.normal(torch.zeros(Z.shape[0], S, device=config.device, dtype=config.dtype), torch.tensor(1.0, device=config.device, dtype=config.dtype)) # MxS L = torch.cholesky(var + 1e-6*torch.ones(Z.shape[0]).diagflat()) # MxM samples = mu + L.mm(u) # MxS - if num is None: + if n is None: samples = samples.squeeze() return samples.detach().numpy() @@ -228,7 +228,7 @@

        Module mogptk.kernels.model

        p -= self.log_marginal_likelihood_constant return p#/self.X.shape[0] # dividing by the number of data points normalizes the learning rate - def predict(self, Z): + def predict(self, Z, full=False, numpy=True): with torch.no_grad(): Z = self._check_input(Z) # MxD @@ -247,8 +247,12 @@

        Module mogptk.kernels.model

        mu = Ks.T.mm(torch.cholesky_solve(self.y,L)) # Mx1 var = Kss - v.T.mm(v) # MxM - var = var.diag().reshape(-1,1) # Mx1 - return mu.detach().numpy(), var.detach().numpy()
        + if not full: + var = var.diag().reshape(-1,1) # Mx1 + if numpy: + return mu.detach().numpy(), var.detach().numpy() + else: + return mu.detach(), var.detach()
      @@ -260,7 +264,7 @@

      Module mogptk.kernels.model

      Classes

      -
      +
      class CholeskyException (...)
      @@ -269,7 +273,7 @@

      Classes

      Expand source code -Browse git +Browse git
      class CholeskyException(Exception):
           pass
      @@ -280,7 +284,7 @@

      Ancestors

    • builtins.BaseException
    -
    +
    class GPR (kernel, X, y, noise=1.0, mean=None, name='GPR')
    @@ -289,7 +293,7 @@

    Ancestors

    Expand source code -Browse git +Browse git
    class GPR(Model):
         def __init__(self, kernel, X, y, noise=1.0, mean=None, name="GPR"):
    @@ -314,7 +318,7 @@ 

    Ancestors

    p -= self.log_marginal_likelihood_constant return p#/self.X.shape[0] # dividing by the number of data points normalizes the learning rate - def predict(self, Z): + def predict(self, Z, full=False, numpy=True): with torch.no_grad(): Z = self._check_input(Z) # MxD @@ -333,16 +337,20 @@

    Ancestors

    mu = Ks.T.mm(torch.cholesky_solve(self.y,L)) # Mx1 var = Kss - v.T.mm(v) # MxM - var = var.diag().reshape(-1,1) # Mx1 - return mu.detach().numpy(), var.detach().numpy()
    + if not full: + var = var.diag().reshape(-1,1) # Mx1 + if numpy: + return mu.detach().numpy(), var.detach().numpy() + else: + return mu.detach(), var.detach()

    Ancestors

    Methods

    -
    +
    def log_marginal_likelihood(self)
    @@ -350,7 +358,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def log_marginal_likelihood(self):
         K = self.kernel(self.X) + self.noise()*torch.eye(self.X.shape[0], device=config.device, dtype=config.dtype)  # NxN
    @@ -367,17 +375,17 @@ 

    Methods

    return p#/self.X.shape[0] # dividing by the number of data points normalizes the learning rate
    -
    -def predict(self, Z) +
    +def predict(self, Z, full=False, numpy=True)
    Expand source code -Browse git +Browse git -
    def predict(self, Z):
    +
    def predict(self, Z, full=False, numpy=True):
         with torch.no_grad():
             Z = self._check_input(Z)  # MxD
     
    @@ -396,13 +404,17 @@ 

    Methods

    mu = Ks.T.mm(torch.cholesky_solve(self.y,L)) # Mx1 var = Kss - v.T.mm(v) # MxM - var = var.diag().reshape(-1,1) # Mx1 - return mu.detach().numpy(), var.detach().numpy()
    + if not full: + var = var.diag().reshape(-1,1) # Mx1 + if numpy: + return mu.detach().numpy(), var.detach().numpy() + else: + return mu.detach(), var.detach()
    -
    +
    class Model (kernel, X, y, mean=None, name=None)
    @@ -411,16 +423,16 @@

    Methods

    Expand source code -Browse git +Browse git
    class Model:
         def __init__(self, kernel, X, y, mean=None, name=None):
             if not issubclass(type(kernel), Kernel):
    -            raise ValueError("kernel must derive from mogptk.kernels.Kernel")
    +            raise ValueError("kernel must derive from mogptk.gpr.Kernel")
             X, y = self._check_input(X, y)
             if mean is not None:
                 if not issubclass(type(mean), Mean):
    -                raise ValueError("mean must derive from mogptk.kernels.Mean")
    +                raise ValueError("mean must derive from mogptk.gpr.Mean")
                 mu = mean(X).reshape(-1,1)
                 if mu.shape != y.shape:
                     raise ValueError("mean and y data must match shapes: %s != %s" % (mu.shape, y.shape))
    @@ -530,7 +542,7 @@ 

    Methods

    except Exception as e: vals = [["Name", "Range", "Value"]] for name, p in zip(self._param_names, self._params): - vals.append([name, param_range(p.lower, p.upper, p.trainable), str(p.constrained.detach().numy())]) + vals.append([name, param_range(p.lower, p.upper, p.trainable), str(p.constrained.detach().numpy())]) nameWidth = max([len(val[0]) for val in vals]) rangeWidth = max([len(val[1]) for val in vals]) @@ -575,21 +587,21 @@

    Methods

    if n is None: S = 1 - mu, var = self.predict(Z, full_var=True) # MxD and MxMxD + mu, var = self.predict(Z, full=True, numpy=False) # MxD and MxMxD u = torch.normal(torch.zeros(Z.shape[0], S, device=config.device, dtype=config.dtype), torch.tensor(1.0, device=config.device, dtype=config.dtype)) # MxS L = torch.cholesky(var + 1e-6*torch.ones(Z.shape[0]).diagflat()) # MxM samples = mu + L.mm(u) # MxS - if num is None: + if n is None: samples = samples.squeeze() return samples.detach().numpy()

    Subclasses

    Methods

    -
    +
    def K(self, Z)
    @@ -597,7 +609,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, Z):
         with torch.no_grad():
    @@ -605,7 +617,7 @@ 

    Methods

    return self.kernel(Z).detach().numpy()
    -
    +
    def log_marginal_likelihood(self)
    @@ -613,13 +625,13 @@

    Methods

    Expand source code -Browse git +Browse git
    def log_marginal_likelihood(self):
         raise NotImplementedError()
    -
    +
    def log_prior(self)
    @@ -627,13 +639,13 @@

    Methods

    Expand source code -Browse git +Browse git
    def log_prior(self):
         return sum([p.log_prior() for p in self._params])
    -
    +
    def loss(self)
    @@ -641,7 +653,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def loss(self):
         self.zero_grad()
    @@ -650,7 +662,7 @@ 

    Methods

    return loss
    -
    +
    def parameters(self)
    @@ -658,7 +670,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def parameters(self):
         for p in self._params:
    @@ -666,7 +678,7 @@ 

    Methods

    yield p.unconstrained
    -
    +
    def print_parameters(self)
    @@ -674,7 +686,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def print_parameters(self):
         def param_range(lower, upper, trainable=True):
    @@ -698,7 +710,7 @@ 

    Methods

    except Exception as e: vals = [["Name", "Range", "Value"]] for name, p in zip(self._param_names, self._params): - vals.append([name, param_range(p.lower, p.upper, p.trainable), str(p.constrained.detach().numy())]) + vals.append([name, param_range(p.lower, p.upper, p.trainable), str(p.constrained.detach().numpy())]) nameWidth = max([len(val[0]) for val in vals]) rangeWidth = max([len(val[1]) for val in vals]) @@ -706,7 +718,7 @@

    Methods

    print("%-*s %-*s %s" % (nameWidth, val[0], rangeWidth, val[1], val[2]))
    -
    +
    def sample(self, Z, n=None)
    @@ -714,7 +726,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def sample(self, Z, n=None):
         with torch.no_grad():
    @@ -722,16 +734,16 @@ 

    Methods

    if n is None: S = 1 - mu, var = self.predict(Z, full_var=True) # MxD and MxMxD + mu, var = self.predict(Z, full=True, numpy=False) # MxD and MxMxD u = torch.normal(torch.zeros(Z.shape[0], S, device=config.device, dtype=config.dtype), torch.tensor(1.0, device=config.device, dtype=config.dtype)) # MxS L = torch.cholesky(var + 1e-6*torch.ones(Z.shape[0]).diagflat()) # MxM samples = mu + L.mm(u) # MxS - if num is None: + if n is None: samples = samples.squeeze() return samples.detach().numpy()
    -
    +
    def zero_grad(self)
    @@ -739,7 +751,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def zero_grad(self):
         for p in self._params:
    @@ -765,32 +777,32 @@ 

    Index

    -
    +
    class GaussianConvolutionProcessKernel (output_dims, input_dims, active_dims=None, name='CONV')
    @@ -296,7 +296,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class GaussianConvolutionProcessKernel(MultiOutputKernel):
         def __init__(self, output_dims, input_dims, active_dims=None, name="CONV"):
    @@ -330,12 +330,12 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def Ksub(self, i, j, X1, X2=None)
    @@ -343,7 +343,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def Ksub(self, i, j, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -365,7 +365,7 @@ 

    Methods

    -
    +
    class IndependentMultiOutputKernel (*kernels, output_dims=None, name='IMO')
    @@ -374,7 +374,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class IndependentMultiOutputKernel(MultiOutputKernel):
         def __init__(self, *kernels, output_dims=None, name="IMO"):
    @@ -397,12 +397,12 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def Ksub(self, i, j, X1, X2=None)
    @@ -410,7 +410,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def Ksub(self, i, j, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -424,7 +424,7 @@ 

    Methods

    -
    +
    class LinearModelOfCoregionalizationKernel (*kernels, output_dims, input_dims, Q=None, Rq=1, name='LMC')
    @@ -433,7 +433,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class LinearModelOfCoregionalizationKernel(MultiOutputKernel):
         def __init__(self, *kernels, output_dims, input_dims, Q=None, Rq=1, name="LMC"):
    @@ -458,12 +458,12 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def Ksub(self, i, j, X1, X2=None)
    @@ -471,7 +471,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def Ksub(self, i, j, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -482,7 +482,7 @@ 

    Methods

    -
    +
    class MultiOutputSpectralKernel (output_dims, input_dims, active_dims=None, name='MOSM')
    @@ -491,7 +491,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class MultiOutputSpectralKernel(MultiOutputKernel):
         def __init__(self, output_dims, input_dims, active_dims=None, name="MOSM"):
    @@ -542,12 +542,12 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def Ksub(self, i, j, X1, X2=None)
    @@ -555,7 +555,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def Ksub(self, i, j, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -596,39 +596,39 @@ 

    Index

    -
    +
    class Sigmoid (lower=0.0, upper=1.0)
    @@ -411,7 +411,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class Sigmoid(Transform):
         def __init__(self, lower=0.0, upper=1.0):
    @@ -429,11 +429,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def forward(self, x)
    @@ -441,13 +441,13 @@

    Methods

    Expand source code -Browse git +Browse git
    def forward(self, x):
         return self.lower + (self.upper-self.lower)*torch.sigmoid(x)
    -
    +
    def inverse(self, y)
    @@ -455,7 +455,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def inverse(self, y):
         if torch.any(y < self.lower) or torch.any(self.upper < y):
    @@ -466,7 +466,7 @@ 

    Methods

    -
    +
    class Softplus (lower=0.0, beta=0.1, threshold=20.0)
    @@ -475,7 +475,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class Softplus(Transform):
         def __init__(self, lower=0.0, beta=0.1, threshold=20.0):
    @@ -493,11 +493,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def forward(self, x)
    @@ -505,13 +505,13 @@

    Methods

    Expand source code -Browse git +Browse git
    def forward(self, x):
         return self.lower + functional.softplus(x, beta=self.beta, threshold=self.threshold)
    -
    +
    def inverse(self, y)
    @@ -519,7 +519,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def inverse(self, y):
         if torch.any(y < self.lower):
    @@ -529,7 +529,7 @@ 

    Methods

    -
    +
    class Transform
    @@ -537,7 +537,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class Transform:
         def forward(self, x):
    @@ -550,12 +550,12 @@ 

    Methods

    Subclasses

    Methods

    -
    +
    def forward(self, x)
    @@ -563,14 +563,14 @@

    Methods

    Expand source code -Browse git +Browse git
    def forward(self, x):
         # unconstrained to constrained space
         raise NotImplementedError()
    -
    +
    def inverse(self, y)
    @@ -578,7 +578,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def inverse(self, y):
         # constrained to unconstrained space
    @@ -598,39 +598,39 @@ 

    Index

    • Super-module

    • Classes

      diff --git a/docs/kernels/singleoutput.html b/docs/gpr/singleoutput.html similarity index 67% rename from docs/kernels/singleoutput.html rename to docs/gpr/singleoutput.html index 54ac944f..4c576ad5 100644 --- a/docs/kernels/singleoutput.html +++ b/docs/gpr/singleoutput.html @@ -4,7 +4,7 @@ -mogptk.kernels.singleoutput API documentation +mogptk.gpr.singleoutput API documentation @@ -19,13 +19,13 @@
      -

      Module mogptk.kernels.singleoutput

      +

      Module mogptk.gpr.singleoutput

      Expand source code -Browse git +Browse git
      import torch
       import numpy as np
      @@ -160,7 +160,36 @@ 

      Module mogptk.kernels.singleoutput

      tau = self.distance(X1,X2) # NxMxD exp = torch.exp(-2.0*np.pi**2 * tau**2 * self.variance().reshape(1,1,-1)) # NxMxD cos = torch.cos(2.0*np.pi * tau * self.mean().reshape(1,1,-1)) # NxMxD - return self.weight() * torch.prod(exp * cos, dim=2)
      + return self.weight() * torch.prod(exp * cos, dim=2) + +class MaternKernel(Kernel): + def __init__(self, nu=0.5, input_dims=None, active_dims=None, name="Matérn"): + super(MaternKernel, self).__init__(input_dims, active_dims, name) + + if nu not in [0.5, 1.5, 2.5]: + raise ValueError("nu parameter must be 0.5, 1.5, or 2.5") + + l = torch.rand(input_dims) + sigma = torch.rand(1) + + self.nu = nu + self.l = Parameter(l, lower=1e-6) + self.sigma = Parameter(sigma, lower=1e-6) + + def K(self, X1, X2=None): + # X has shape (data_points,input_dims) + X1,X2 = self._check_input(X1,X2) + if X2 is None: + X2 = X1 + + dist = torch.abs(torch.tensordot(self.distance(X1,X2), 1.0/self.l(), dims=1)) + if self.nu == 0.5: + constant = 1.0 + elif self.nu == 1.5: + constant = 1.0 + np.sqrt(3.0)*dist + elif self.nu == 2.5: + constant = 1.0 + np.sqrt(5.0)*dist + 5.0/3.0*dist**2 + return self.sigma()**2 * constant * torch.exp(-np.sqrt(self.nu*2.0)*dist)
    @@ -172,7 +201,7 @@

    Module mogptk.kernels.singleoutput

    Classes

    -
    +
    class LinearKernel (input_dims=None, active_dims=None, name='Linear')
    @@ -181,7 +210,7 @@

    Classes

    Expand source code -Browse git +Browse git
    class LinearKernel(Kernel):
         def __init__(self, input_dims=None, active_dims=None, name="Linear"):
    @@ -201,11 +230,11 @@ 

    Classes

    Ancestors

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -213,7 +242,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -226,7 +255,81 @@ 

    Methods

    -
    +
    +class MaternKernel +(nu=0.5, input_dims=None, active_dims=None, name='Matérn') +
    +
    +
    +
    + +Expand source code +Browse git + +
    class MaternKernel(Kernel):
    +    def __init__(self, nu=0.5, input_dims=None, active_dims=None, name="Matérn"):
    +        super(MaternKernel, self).__init__(input_dims, active_dims, name)
    +
    +        if nu not in [0.5, 1.5, 2.5]:
    +            raise ValueError("nu parameter must be 0.5, 1.5, or 2.5")
    +
    +        l = torch.rand(input_dims)
    +        sigma = torch.rand(1)
    +
    +        self.nu = nu
    +        self.l = Parameter(l, lower=1e-6)
    +        self.sigma = Parameter(sigma, lower=1e-6)
    +
    +    def K(self, X1, X2=None):
    +        # X has shape (data_points,input_dims)
    +        X1,X2 = self._check_input(X1,X2)
    +        if X2 is None:
    +            X2 = X1
    +
    +        dist = torch.abs(torch.tensordot(self.distance(X1,X2), 1.0/self.l(), dims=1))
    +        if self.nu == 0.5:
    +            constant = 1.0
    +        elif self.nu == 1.5:
    +            constant = 1.0 + np.sqrt(3.0)*dist
    +        elif self.nu == 2.5:
    +            constant = 1.0 + np.sqrt(5.0)*dist + 5.0/3.0*dist**2
    +        return self.sigma()**2 * constant * torch.exp(-np.sqrt(self.nu*2.0)*dist)
    +
    +

    Ancestors

    + +

    Methods

    +
    +
    +def K(self, X1, X2=None) +
    +
    +
    +
    + +Expand source code +Browse git + +
    def K(self, X1, X2=None):
    +    # X has shape (data_points,input_dims)
    +    X1,X2 = self._check_input(X1,X2)
    +    if X2 is None:
    +        X2 = X1
    +
    +    dist = torch.abs(torch.tensordot(self.distance(X1,X2), 1.0/self.l(), dims=1))
    +    if self.nu == 0.5:
    +        constant = 1.0
    +    elif self.nu == 1.5:
    +        constant = 1.0 + np.sqrt(3.0)*dist
    +    elif self.nu == 2.5:
    +        constant = 1.0 + np.sqrt(5.0)*dist + 5.0/3.0*dist**2
    +    return self.sigma()**2 * constant * torch.exp(-np.sqrt(self.nu*2.0)*dist)
    +
    +
    +
    +
    +
    class PeriodicKernel (input_dims, active_dims=None, name='Periodic')
    @@ -235,7 +338,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class PeriodicKernel(Kernel):
         def __init__(self, input_dims, active_dims=None, name="Periodic"):
    @@ -259,11 +362,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -271,7 +374,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -284,7 +387,7 @@ 

    Methods

    -
    +
    class PhiKernel (phi, input_dims, active_dims=None, name='Phi')
    @@ -293,7 +396,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class PhiKernel(Kernel):
         def __init__(self, phi, input_dims, active_dims=None, name="Phi"):
    @@ -317,11 +420,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -329,7 +432,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -344,7 +447,7 @@ 

    Methods

    -
    +
    class PolynomialKernel (degree, input_dims=None, active_dims=None, name='Polynomial')
    @@ -353,7 +456,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class PolynomialKernel(Kernel):
         def __init__(self, degree, input_dims=None, active_dims=None, name="Polynomial"):
    @@ -374,11 +477,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -386,7 +489,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -399,7 +502,7 @@ 

    Methods

    -
    +
    class RationalQuadraticKernel (alpha, input_dims, active_dims=None, name='RQ')
    @@ -408,7 +511,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class RationalQuadraticKernel(Kernel):
         def __init__(self, alpha, input_dims, active_dims=None, name="RQ"):
    @@ -431,11 +534,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -443,7 +546,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -456,7 +559,7 @@ 

    Methods

    -
    +
    class SpectralKernel (input_dims, active_dims=None, name='SM')
    @@ -465,7 +568,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class SpectralKernel(Kernel):
         def __init__(self, input_dims, active_dims=None, name="SM"):
    @@ -490,11 +593,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -502,7 +605,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -516,7 +619,7 @@ 

    Methods

    -
    +
    class SquaredExponentialKernel (input_dims, active_dims=None, name='SE')
    @@ -525,7 +628,7 @@

    Methods

    Expand source code -Browse git +Browse git
    class SquaredExponentialKernel(Kernel):
         def __init__(self, input_dims, active_dims=None, name="SE"):
    @@ -547,11 +650,11 @@ 

    Methods

    Ancestors

    Methods

    -
    +
    def K(self, X1, X2=None)
    @@ -559,7 +662,7 @@

    Methods

    Expand source code -Browse git +Browse git
    def K(self, X1, X2=None):
         # X has shape (data_points,input_dims)
    @@ -583,51 +686,57 @@ 

    Index

    +

    Using the GPU

    +

    If a GPU is available through CUDA it will be automatically used in tensor calculations, and may speed up training significantly. To get more information about whether CUDA is supported or which GPU is used, as well as more control over which CPU or GPU to use, see mogptk.gpr.config whose functions can be accessed through the main MOGPTK namespace (e.g. mogptk.print_gpu_information() or mogptk.use_gpu()).

    Advice on training

    Visualization and interpretation

    Expand source code -Browse git +Browse git
    """
     .. include:: ./documentation.md
    @@ -75,18 +77,16 @@ 

    Visualization and interpretation

    from .serie import * from .data import * from .dataset import * - from .model import * -from .sm import * -from .mosm import * -from .csm import * -from .conv import * -from .sm_lmc import * +from .models.sm import * +from .models.mosm import * +from .models.csm import * +from .models.sm_lmc import * +from .models.conv import * from .plot import * from .errors import * - -from .kernels.config import *
    +from .gpr.config import *
    @@ -96,14 +96,6 @@

    Sub-modules

    -
    mogptk.conv
    -
    -
    -
    -
    mogptk.csm
    -
    -
    -
    mogptk.data
    @@ -116,7 +108,7 @@

    Sub-modules

    -
    mogptk.kernels
    +
    mogptk.gpr
    @@ -124,7 +116,7 @@

    Sub-modules

    -
    mogptk.mosm
    +
    mogptk.models
    @@ -136,14 +128,6 @@

    Sub-modules

    -
    mogptk.sm
    -
    -
    -
    -
    mogptk.sm_lmc
    -
    -
    -
    @@ -163,6 +147,7 @@

    Index

  • Transformations
  • +
  • Using the GPU
  • Advice on training
  • Visualization and interpretation
  • @@ -171,18 +156,14 @@

    Index

  • Sub-modules

  • diff --git a/docs/model.html b/docs/model.html index 110001ba..33866972 100644 --- a/docs/model.html +++ b/docs/model.html @@ -25,7 +25,7 @@

    Module mogptk.model

    Expand source code -Browse git +Browse git
    import os
     import time
    @@ -39,7 +39,7 @@ 

    Module mogptk.model

    from .serie import Serie from .dataset import DataSet -from .kernels import GPR, CholeskyException, Kernel, MultiOutputKernel, IndependentMultiOutputKernel +from .gpr import GPR, CholeskyException, Kernel, MultiOutputKernel, IndependentMultiOutputKernel from .errors import mean_absolute_error, mean_absolute_percentage_error, root_mean_squared_error logger = logging.getLogger('mogptk') @@ -72,9 +72,9 @@

    Module mogptk.model

    Args: dataset (mogptk.dataset.DataSet, mogptk.data.Data): `DataSet` with `Data` objects for all the channels. When a (list or dict of) `Data` object is passed, it will automatically be converted to a `DataSet`. - kernel (mogptk.kernels.kernel.Kernel): The kernel class. + kernel (mogptk.gpr.kernel.Kernel): The kernel class. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. """ @@ -122,7 +122,7 @@

    Module mogptk.model

    Returns all parameters of the kernel. Returns: - list: mogptk.kernels.parameter.Parameter + list: mogptk.gpr.parameter.Parameter Examples: >>> params = model.get_parameters() @@ -496,7 +496,7 @@

    Module mogptk.model

    else: X[m*n:(m+1)*n,1] = np.linspace(start[m], end[m], n) - return self.model.K(X).detach().numpy() + return self.model.K(X) def plot(self, start=None, end=None, n=31, title=None, figsize=(12,12)): """ @@ -592,7 +592,7 @@

    Examples

    Expand source code -Browse git +Browse git
    def LoadModel(filename):
         """
    @@ -622,7 +622,7 @@ 

    Classes

    Expand source code -Browse git +Browse git
    class Exact:
         """
    @@ -641,7 +641,7 @@ 

    Methods

    Expand source code -Browse git +Browse git
    def build(self, kernel, x, y, mean=None, name=None):
         return GPR(kernel, x, y, mean=mean, name=name)
    @@ -659,11 +659,11 @@

    Args

    dataset : DataSet, Data
    DataSet with Data objects for all the channels. When a (list or dict of) Data object is passed, it will automatically be converted to a DataSet.
    -
    kernel : Kernel
    +
    kernel : Kernel
    The kernel class.
    model
    Gaussian process model to use, such as Exact.
    -
    mean : Mean
    +
    mean : Mean
    The mean class.
    name : str
    Name of the model.
    @@ -671,7 +671,7 @@

    Args

    Expand source code -Browse git +Browse git
    class Model:
         def __init__(self, dataset, kernel, model=Exact(), mean=None, name=None):
    @@ -680,9 +680,9 @@ 

    Args

    Args: dataset (mogptk.dataset.DataSet, mogptk.data.Data): `DataSet` with `Data` objects for all the channels. When a (list or dict of) `Data` object is passed, it will automatically be converted to a `DataSet`. - kernel (mogptk.kernels.kernel.Kernel): The kernel class. + kernel (mogptk.gpr.kernel.Kernel): The kernel class. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. """ @@ -730,7 +730,7 @@

    Args

    Returns all parameters of the kernel. Returns: - list: mogptk.kernels.parameter.Parameter + list: mogptk.gpr.parameter.Parameter Examples: >>> params = model.get_parameters() @@ -1104,7 +1104,7 @@

    Args

    else: X[m*n:(m+1)*n,1] = np.linspace(start[m], end[m], n) - return self.model.K(X).detach().numpy() + return self.model.K(X) def plot(self, start=None, end=None, n=31, title=None, figsize=(12,12)): """ @@ -1150,11 +1150,11 @@

    Args

    Subclasses

    Methods

    @@ -1179,7 +1179,7 @@

    Examples

    Expand source code -Browse git +Browse git
    def error(self, method='MAE'):
         """
    @@ -1229,7 +1229,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_gram_matrix(self, start=None, end=None, n=31):
         """
    @@ -1265,7 +1265,7 @@ 

    Examples

    else: X[m*n:(m+1)*n,1] = np.linspace(start[m], end[m], n) - return self.model.K(X).detach().numpy()
    + return self.model.K(X)
    @@ -1276,8 +1276,7 @@

    Examples

    Returns

    list
    -
    mogptk.kernels.parameter.Parameter -
    +
    mogptk.gpr.parameter.Parameter

    Examples

    >>> params = model.get_parameters()
    @@ -1285,14 +1284,14 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def get_parameters(self):
         """
         Returns all parameters of the kernel.
     
         Returns:
    -        list: mogptk.kernels.parameter.Parameter    
    +        list: mogptk.gpr.parameter.Parameter
     
         Examples:
             >>> params = model.get_parameters()
    @@ -1316,7 +1315,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def log_marginal_likelihood(self):
         """
    @@ -1347,7 +1346,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def loss(self):
         """
    @@ -1390,7 +1389,7 @@ 

    Returns

    Expand source code -Browse git +Browse git
    def plot(self, start=None, end=None, n=31, title=None, figsize=(12,12)):
         """
    @@ -1460,7 +1459,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def plot_prediction(self, title=None, figsize=None, legend=True, transformed=False):
         """
    @@ -1506,7 +1505,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def predict(self, X=None, sigma=2.0, transformed=False):
         """
    @@ -1575,7 +1574,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def print_parameters(self):
         """
    @@ -1603,7 +1602,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def save(self, filename):
         """
    @@ -1657,7 +1656,7 @@ 

    Examples

    Expand source code -Browse git +Browse git
    def train(
         self,
    diff --git a/docs/conv.html b/docs/models/conv.html
    similarity index 86%
    rename from docs/conv.html
    rename to docs/models/conv.html
    index 2c2e13a0..c14908b3 100644
    --- a/docs/conv.html
    +++ b/docs/models/conv.html
    @@ -4,7 +4,7 @@
     
     
     
    -mogptk.conv API documentation
    +mogptk.models.conv API documentation
     
     
     
    @@ -19,19 +19,19 @@
     
    -

    Module mogptk.conv

    +

    Module mogptk.models.conv

    Expand source code -Browse git +Browse git
    import numpy as np
     
    -from .dataset import DataSet
    -from .model import Model, Exact, logger
    -from .kernels import GaussianConvolutionProcessKernel, MixtureKernel
    +from ..dataset import DataSet
    +from ..model import Model, Exact, logger
    +from ..gpr import GaussianConvolutionProcessKernel, MixtureKernel
     
     class CONV(Model):
         """
    @@ -41,13 +41,13 @@ 

    Module mogptk.conv

    dataset (mogptk.dataset.DataSet): `DataSet` object of data for all channels. Q (int): Number of components. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -152,7 +152,7 @@

    Module mogptk.conv

    Classes

    -
    +
    class CONV (dataset, Q=1, model=<mogptk.model.Exact object>, mean=None, name='CONV')
    @@ -160,13 +160,13 @@

    Classes

    CONV is the Convolutional Gaussian kernel with Q components [1]. The parameters will be randomly instantiated, use init_parameters() to initialize the parameters to reasonable values for the current data set.

    Args

    -
    dataset : DataSet
    +
    dataset : DataSet
    DataSet object of data for all channels.
    Q : int
    Number of components.
    model
    -
    Gaussian process model to use, such as Exact.
    -
    mean : Mean
    +
    Gaussian process model to use, such as Exact.
    +
    mean : Mean
    The mean class.
    name : str
    Name of the model.
    @@ -176,9 +176,9 @@

    Attributes

    dataset
    The associated mogptk.dataset.DataSet.
    model
    -
    The mogptk.kernels.model.Model.
    +
    The mogptk.gpr.model.Model.
    kernel
    -
    The mogptk.kernels.kernel.Kernel.
    +
    The mogptk.gpr.kernel.Kernel.

    Examples:

    >>> import numpy as np
    @@ -199,13 +199,13 @@ 

    Attributes

    Model is the base class for multi-output Gaussian process models.

    Args

    -
    dataset : DataSet, Data
    +
    dataset : DataSet, Data
    DataSet with Data objects for all the channels. When a (list or dict of) Data object is passed, it will automatically be converted to a DataSet.
    -
    kernel : Kernel
    +
    kernel : Kernel
    The kernel class.
    model
    -
    Gaussian process model to use, such as Exact.
    -
    mean : Mean
    +
    Gaussian process model to use, such as Exact.
    +
    mean : Mean
    The mean class.
    name : str
    Name of the model.
    @@ -213,7 +213,7 @@

    Args

    Expand source code -Browse git +Browse git
    class CONV(Model):
         """
    @@ -223,13 +223,13 @@ 

    Args

    dataset (mogptk.dataset.DataSet): `DataSet` object of data for all channels. Q (int): Number of components. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -326,11 +326,11 @@

    Args

    Ancestors

    Methods

    -
    +
    def init_parameters(self, method='SM', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False)
    @@ -359,7 +359,7 @@

    Args

    Expand source code -Browse git +Browse git
    def init_parameters(self, method='SM', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False):
         """
    @@ -421,19 +421,19 @@ 

    Args

    Inherited members

    @@ -449,15 +449,15 @@

    Index

    • Super-module

    • Classes

      diff --git a/docs/csm.html b/docs/models/csm.html similarity index 87% rename from docs/csm.html rename to docs/models/csm.html index dc39af11..2d0f1e35 100644 --- a/docs/csm.html +++ b/docs/models/csm.html @@ -4,7 +4,7 @@ -mogptk.csm API documentation +mogptk.models.csm API documentation @@ -19,19 +19,19 @@
      -

      Module mogptk.csm

      +

      Module mogptk.models.csm

      Expand source code -Browse git +Browse git
      import numpy as np
       
      -from .dataset import DataSet
      -from .model import Model, Exact, logger
      -from .kernels import CrossSpectralKernel, MixtureKernel
      +from ..dataset import DataSet
      +from ..model import Model, Exact, logger
      +from ..gpr import CrossSpectralKernel, MixtureKernel
       
       class CSM(Model):
           """
      @@ -42,13 +42,13 @@ 

      Module mogptk.csm

      Q (int): Number of components. Rq (int): Number of subcomponents. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -160,7 +160,7 @@

      Module mogptk.csm

      Classes

      -
      +
      class CSM (dataset, Q=1, Rq=1, model=<mogptk.model.Exact object>, mean=None, name='CSM')
      @@ -168,15 +168,15 @@

      Classes

      Cross Spectral Mixture kernel [1] with Q components and Rq latent functions. The parameters will be randomly instantiated, use init_parameters() to initialize the parameters to reasonable values for the current data set.

      Args

      -
      dataset : DataSet
      +
      dataset : DataSet
      DataSet object of data for all channels.
      Q : int
      Number of components.
      Rq : int
      Number of subcomponents.
      model
      -
      Gaussian process model to use, such as Exact.
      -
      mean : Mean
      +
      Gaussian process model to use, such as Exact.
      +
      mean : Mean
      The mean class.
      name : str
      Name of the model.
      @@ -186,9 +186,9 @@

      Attributes

      dataset
      The associated mogptk.dataset.DataSet.
      model
      -
      The mogptk.kernels.model.Model.
      +
      The mogptk.gpr.model.Model.
      kernel
      -
      The mogptk.kernels.kernel.Kernel.
      +
      The mogptk.gpr.kernel.Kernel.

      Examples:

      >>> import numpy as np
      @@ -209,13 +209,13 @@ 

      Attributes

      Model is the base class for multi-output Gaussian process models.

      Args

      -
      dataset : DataSet, Data
      +
      dataset : DataSet, Data
      DataSet with Data objects for all the channels. When a (list or dict of) Data object is passed, it will automatically be converted to a DataSet.
      -
      kernel : Kernel
      +
      kernel : Kernel
      The kernel class.
      model
      -
      Gaussian process model to use, such as Exact.
      -
      mean : Mean
      +
      Gaussian process model to use, such as Exact.
      +
      mean : Mean
      The mean class.
      name : str
      Name of the model.
      @@ -223,7 +223,7 @@

      Args

      Expand source code -Browse git +Browse git
      class CSM(Model):
           """
      @@ -234,13 +234,13 @@ 

      Args

      Q (int): Number of components. Rq (int): Number of subcomponents. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -344,11 +344,11 @@

      Args

      Ancestors

      Methods

      -
      +
      def init_parameters(self, method='BNSE', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False)
      @@ -377,7 +377,7 @@

      Args

      Expand source code -Browse git +Browse git
      def init_parameters(self, method='BNSE', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False):
           """
      @@ -449,19 +449,19 @@ 

      Args

      Inherited members

      @@ -477,15 +477,15 @@

      Index

      • Super-module

      • Classes

        diff --git a/docs/models/index.html b/docs/models/index.html new file mode 100644 index 00000000..1986054f --- /dev/null +++ b/docs/models/index.html @@ -0,0 +1,87 @@ + + + + + + +mogptk.models API documentation + + + + + + + + + + + +
        + + +
        + + + + + \ No newline at end of file diff --git a/docs/mosm.html b/docs/models/mosm.html similarity index 89% rename from docs/mosm.html rename to docs/models/mosm.html index f255666a..49ed1c9a 100644 --- a/docs/mosm.html +++ b/docs/models/mosm.html @@ -4,7 +4,7 @@ -mogptk.mosm API documentation +mogptk.models.mosm API documentation @@ -19,21 +19,21 @@
        -

        Module mogptk.mosm

        +

        Module mogptk.models.mosm

        Expand source code -Browse git +Browse git
        import numpy as np
         import matplotlib.pyplot as plt
         
        -from .dataset import DataSet
        -from .model import Model, Exact, logger
        -from .kernels import MultiOutputSpectralKernel, MixtureKernel
        -from .plot import plot_spectrum
        +from ..dataset import DataSet
        +from ..model import Model, Exact, logger
        +from ..gpr import MultiOutputSpectralKernel, MixtureKernel
        +from ..plot import plot_spectrum
         
         class MOSM(Model):
             """
        @@ -43,13 +43,13 @@ 

        Module mogptk.mosm

        dataset (mogptk.dataset.DataSet): `DataSet` object of data for all channels. Q (int): Number of components. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Atributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: >>> import numpy as np @@ -312,7 +312,7 @@

        Module mogptk.mosm

        Classes

        -
        +
        class MOSM (dataset, Q=1, model=<mogptk.model.Exact object>, mean=None, name='MOSM')
        @@ -320,13 +320,13 @@

        Classes

        Multi-Output Spectral Mixture kernel with Q components as proposed by [1]. The parameters will be randomly instantiated, use init_parameters() to initialize the parameters to reasonable values for the current data set.

        Args

        -
        dataset : DataSet
        +
        dataset : DataSet
        DataSet object of data for all channels.
        Q : int
        Number of components.
        model
        -
        Gaussian process model to use, such as Exact.
        -
        mean : Mean
        +
        Gaussian process model to use, such as Exact.
        +
        mean : Mean
        The mean class.
        name : str
        Name of the model.
        @@ -336,9 +336,9 @@

        Atributes

        dataset
        The associated mogptk.dataset.DataSet.
        model
        -
        The mogptk.kernels.model.Model.
        +
        The mogptk.gpr.model.Model.
        kernel
        -
        The mogptk.kernels.kernel.Kernel.
        +
        The mogptk.gpr.kernel.Kernel.

        Examples:

        >>> import numpy as np
        @@ -359,13 +359,13 @@ 

        Atributes

        Model is the base class for multi-output Gaussian process models.

        Args

        -
        dataset : DataSet, Data
        +
        dataset : DataSet, Data
        DataSet with Data objects for all the channels. When a (list or dict of) Data object is passed, it will automatically be converted to a DataSet.
        -
        kernel : Kernel
        +
        kernel : Kernel
        The kernel class.
        model
        -
        Gaussian process model to use, such as Exact.
        -
        mean : Mean
        +
        Gaussian process model to use, such as Exact.
        +
        mean : Mean
        The mean class.
        name : str
        Name of the model.
        @@ -373,7 +373,7 @@

        Args

        Expand source code -Browse git +Browse git
        class MOSM(Model):
             """
        @@ -383,13 +383,13 @@ 

        Args

        dataset (mogptk.dataset.DataSet): `DataSet` object of data for all channels. Q (int): Number of components. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Atributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: >>> import numpy as np @@ -644,11 +644,11 @@

        Args

        Ancestors

        Methods

        -
        +
        def check(self)
        @@ -656,7 +656,7 @@

        Methods

        Expand source code -Browse git +Browse git
        def check(self):
             """
        @@ -670,7 +670,7 @@ 

        Methods

        print("‣ MOSM approaches RBF kernel for q=%d in channel='%s'" % (q, self.dataset[j].name))
        -
        +
        def init_parameters(self, method='BNSE', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False)
        @@ -699,7 +699,7 @@

        Args

        Expand source code -Browse git +Browse git
        def init_parameters(self, method='BNSE', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False):
             """
        @@ -766,7 +766,7 @@ 

        Args

        self.model.kernel[q].noise.assign(noise)
        -
        +
        def plot_cross_spectrum(self, title=None, figsize=(12, 12))
        @@ -774,7 +774,7 @@

        Args

        Expand source code -Browse git +Browse git
        def plot_cross_spectrum(self, title=None, figsize=(12,12)):
             """
        @@ -808,7 +808,7 @@ 

        Args

        return fig, axes
        -
        +
        def plot_spectrum(self, title=None)
        @@ -816,7 +816,7 @@

        Args

        Expand source code -Browse git +Browse git
        def plot_spectrum(self, title=None):
             """
        @@ -835,19 +835,19 @@ 

        Args

        Inherited members

        @@ -863,18 +863,18 @@

        Index

        • Super-module

        • Classes

          diff --git a/docs/sm.html b/docs/models/sm.html similarity index 87% rename from docs/sm.html rename to docs/models/sm.html index 858b3d7c..b653f4a0 100644 --- a/docs/sm.html +++ b/docs/models/sm.html @@ -4,7 +4,7 @@ -mogptk.sm API documentation +mogptk.models.sm API documentation @@ -19,20 +19,20 @@
          -

          Module mogptk.sm

          +

          Module mogptk.models.sm

          Expand source code -Browse git +Browse git
          import numpy as np
           
          -from .dataset import DataSet
          -from .model import Model, Exact, logger
          -from .kernels import SpectralKernel, IndependentMultiOutputKernel, MixtureKernel
          -from .plot import plot_spectrum
          +from ..dataset import DataSet
          +from ..model import Model, Exact, logger
          +from ..gpr import SpectralKernel, IndependentMultiOutputKernel, MixtureKernel
          +from ..plot import plot_spectrum
           
           class SM(Model):
               """
          @@ -42,13 +42,13 @@ 

          Module mogptk.sm

          dataset (mogptk.dataset.DataSet): `DataSet` object of data for all channels. Q (int): Number of components. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -182,7 +182,7 @@

          Module mogptk.sm

          Classes

          -
          +
          class SM (dataset, Q=1, model=<mogptk.model.Exact object>, mean=None, name='SM')
          @@ -190,13 +190,13 @@

          Classes

          Independent Spectral Mixture kernels per channel. The spectral mixture kernel is proposed by [1]. The parameters will be randomly instantiated, use init_parameters() to initialize the parameters to reasonable values for the current data set.

          Args

          -
          dataset : DataSet
          +
          dataset : DataSet
          DataSet object of data for all channels.
          Q : int
          Number of components.
          model
          -
          Gaussian process model to use, such as Exact.
          -
          mean : Mean
          +
          Gaussian process model to use, such as Exact.
          +
          mean : Mean
          The mean class.
          name : str
          Name of the model.
          @@ -206,9 +206,9 @@

          Attributes

          dataset
          The associated mogptk.dataset.DataSet.
          model
          -
          The mogptk.kernels.model.Model.
          +
          The mogptk.gpr.model.Model.
          kernel
          -
          The mogptk.kernels.kernel.Kernel.
          +
          The mogptk.gpr.kernel.Kernel.

          Examples:

          >>> import numpy as np
          @@ -228,13 +228,13 @@ 

          Attributes

          Model is the base class for multi-output Gaussian process models.

          Args

          -
          dataset : DataSet, Data
          +
          dataset : DataSet, Data
          DataSet with Data objects for all the channels. When a (list or dict of) Data object is passed, it will automatically be converted to a DataSet.
          -
          kernel : Kernel
          +
          kernel : Kernel
          The kernel class.
          model
          -
          Gaussian process model to use, such as Exact.
          -
          mean : Mean
          +
          Gaussian process model to use, such as Exact.
          +
          mean : Mean
          The mean class.
          name : str
          Name of the model.
          @@ -242,7 +242,7 @@

          Args

          Expand source code -Browse git +Browse git
          class SM(Model):
               """
          @@ -252,13 +252,13 @@ 

          Args

          dataset (mogptk.dataset.DataSet): `DataSet` object of data for all channels. Q (int): Number of components. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -384,11 +384,11 @@

          Args

          Ancestors

          Methods

          -
          +
          def init_parameters(self, method='BNSE')
          @@ -407,7 +407,7 @@

          Args

          Expand source code -Browse git +Browse git
          def init_parameters(self, method='BNSE'):
               """
          @@ -488,7 +488,7 @@ 

          Args

          self.model.kernel[j][q].variance.assign(variances[j][q,:])
          -
          +
          def plot_spectrum(self, title=None)
          @@ -496,7 +496,7 @@

          Args

          Expand source code -Browse git +Browse git
          def plot_spectrum(self, title=None):
               """
          @@ -516,19 +516,19 @@ 

          Args

          Inherited members

          @@ -544,16 +544,16 @@

          Index

          • Super-module

          • Classes

            diff --git a/docs/sm_lmc.html b/docs/models/sm_lmc.html similarity index 87% rename from docs/sm_lmc.html rename to docs/models/sm_lmc.html index 7a67e786..0081b2cc 100644 --- a/docs/sm_lmc.html +++ b/docs/models/sm_lmc.html @@ -4,7 +4,7 @@ -mogptk.sm_lmc API documentation +mogptk.models.sm_lmc API documentation @@ -19,19 +19,19 @@
            -

            Module mogptk.sm_lmc

            +

            Module mogptk.models.sm_lmc

            Expand source code -Browse git +Browse git
            import numpy as np
             
            -from .dataset import DataSet
            -from .model import Model, Exact, logger
            -from .kernels import LinearModelOfCoregionalizationKernel, SpectralKernel
            +from ..dataset import DataSet
            +from ..model import Model, Exact, logger
            +from ..gpr import LinearModelOfCoregionalizationKernel, SpectralKernel
             
             class SM_LMC(Model):
                 """
            @@ -42,13 +42,13 @@ 

            Module mogptk.sm_lmc

            Q (int): Number of components. Rq (int): Number of subcomponents. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -162,7 +162,7 @@

            Module mogptk.sm_lmc

            Classes

            -
            +
            class SM_LMC (dataset, Q=1, Rq=1, model=<mogptk.model.Exact object>, mean=None, name='SM-LMC')
            @@ -170,15 +170,15 @@

            Classes

            Spectral Mixture Linear Model of Coregionalization kernel with Q components and Rq latent functions. The SM kernel as proposed by [1] is combined with the LMC kernel as proposed by [2]. The parameters will be randomly instantiated, use init_parameters() to initialize the parameters to reasonable values for the current data set.

            Args

            -
            dataset : DataSet
            +
            dataset : DataSet
            DataSet object of data for all channels.
            Q : int
            Number of components.
            Rq : int
            Number of subcomponents.
            model
            -
            Gaussian process model to use, such as Exact.
            -
            mean : Mean
            +
            Gaussian process model to use, such as Exact.
            +
            mean : Mean
            The mean class.
            name : str
            Name of the model.
            @@ -188,9 +188,9 @@

            Attributes

            dataset
            The associated mogptk.dataset.DataSet.
            model
            -
            The mogptk.kernels.model.Model.
            +
            The mogptk.gpr.model.Model.
            kernel
            -
            The mogptk.kernels.kernel.Kernel.
            +
            The mogptk.gpr.kernel.Kernel.

            Examples:

            >>> import numpy as np
            @@ -212,13 +212,13 @@ 

            Attributes

            Model is the base class for multi-output Gaussian process models.

            Args

            -
            dataset : DataSet, Data
            +
            dataset : DataSet, Data
            DataSet with Data objects for all the channels. When a (list or dict of) Data object is passed, it will automatically be converted to a DataSet.
            -
            kernel : Kernel
            +
            kernel : Kernel
            The kernel class.
            model
            -
            Gaussian process model to use, such as Exact.
            -
            mean : Mean
            +
            Gaussian process model to use, such as Exact.
            +
            mean : Mean
            The mean class.
            name : str
            Name of the model.
            @@ -226,7 +226,7 @@

            Args

            Expand source code -Browse git +Browse git
            class SM_LMC(Model):
                 """
            @@ -237,13 +237,13 @@ 

            Args

            Q (int): Number of components. Rq (int): Number of subcomponents. model: Gaussian process model to use, such as `mogptk.model.Exact`. - mean (mogptk.kernels.mean.Mean): The mean class. + mean (mogptk.gpr.mean.Mean): The mean class. name (str): Name of the model. Attributes: dataset: The associated mogptk.dataset.DataSet. - model: The mogptk.kernels.model.Model. - kernel: The mogptk.kernels.kernel.Kernel. + model: The mogptk.gpr.model.Model. + kernel: The mogptk.gpr.kernel.Kernel. Examples: @@ -349,11 +349,11 @@

            Args

            Ancestors

            Methods

            -
            +
            def init_parameters(self, method='BNSE', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False)
            @@ -382,7 +382,7 @@

            Args

            Expand source code -Browse git +Browse git
            def init_parameters(self, method='BNSE', sm_init='BNSE', sm_method='Adam', sm_iters=100, sm_params={}, sm_plot=False):
                 """
            @@ -451,19 +451,19 @@ 

            Args

            Inherited members

            @@ -479,15 +479,15 @@

            Index

            • Super-module

            • Classes

              diff --git a/docs/plot.html b/docs/plot.html index 6aba7416..b297a02a 100644 --- a/docs/plot.html +++ b/docs/plot.html @@ -25,7 +25,7 @@

              Module mogptk.plot

              Expand source code -Browse git +Browse git
              import numpy as np
               import matplotlib.pyplot as plt
              @@ -118,7 +118,7 @@ 

              Functions

              Expand source code -Browse git +Browse git
              def plot_spectrum(means, scales, weights=None, nyquist=None, titles=None, show=True, filename=None, title=None):
                   """
              diff --git a/docs/serie.html b/docs/serie.html
              index 54d0bd42..6e320c47 100644
              --- a/docs/serie.html
              +++ b/docs/serie.html
              @@ -25,7 +25,7 @@ 

              Module mogptk.serie

              Expand source code -Browse git +Browse git
              import numpy as np
               
              @@ -248,7 +248,7 @@ 

              Classes

              Expand source code -Browse git +Browse git
              class Serie(np.ndarray):
                   """
              @@ -345,7 +345,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def apply(self, transformers, x=None):
                   if not isinstance(transformers, list):
              @@ -368,7 +368,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def detransform(self, array, x=None):
                   for t in self.transformers[::-1]:
              @@ -384,7 +384,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def get_time_unit(self):
                   if not self.is_datetime64():
              @@ -405,7 +405,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def is_datetime64(self):
                   return np.issubdtype(self.dtype, np.datetime64)
              @@ -419,7 +419,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def transform(self, array, x=None):
                   array = array.astype(self.dtype).astype(np.float64)
              @@ -438,7 +438,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              class TransformBase:
                   """
              @@ -472,7 +472,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def backward(self, y, x=None):
                   raise NotImplementedError
              @@ -486,7 +486,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def forward(self, y, x=None):
                   raise NotImplementedError
              @@ -500,7 +500,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def set_data(self, data):
                   pass
              @@ -522,7 +522,7 @@

              Args

              Expand source code -Browse git +Browse git
              class TransformDetrend(TransformBase):
                   """
              @@ -568,7 +568,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def backward(self, y, x):
                   if x is None:
              @@ -585,7 +585,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def forward(self, y, x):
                   if x is None:
              @@ -602,7 +602,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def set_data(self, data):
                   if data.get_input_dims() != 1:
              @@ -623,7 +623,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              class TransformLinear(TransformBase):
                   """
              @@ -656,7 +656,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def backward(self, y, x=None):
                   return self.bias + self.slope*y
              @@ -670,7 +670,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def forward(self, y, x=None):
                   return (y-self.bias)/self.slope
              @@ -684,7 +684,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def set_data(self, data):
                   pass
              @@ -700,7 +700,7 @@

              Methods

              Expand source code -Browse git +Browse git
              class TransformLog(TransformBase):
                   """
              @@ -733,7 +733,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def backward(self, y, x=None):
                   return np.exp(y + self.mean) - self.shift
              @@ -747,7 +747,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def forward(self, y, x=None):
                   return np.log(y + self.shift) - self.mean
              @@ -761,7 +761,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def set_data(self, data):
                   self.shift = 1 - data.Y.transformed.min()
              @@ -778,7 +778,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              class TransformNormalize(TransformBase):
                   """
              @@ -811,7 +811,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def backward(self, y, x=None):
                   return (y+1.0)/2.0*(self.ymax-self.ymin)+self.ymin
              @@ -825,7 +825,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def forward(self, y, x=None):
                   return -1.0 + 2.0*(y-self.ymin)/(self.ymax-self.ymin)
              @@ -839,7 +839,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def set_data(self, data):
                   self.ymin = np.amin(data.Y.transformed[data.mask])
              @@ -856,7 +856,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              class TransformStandard(TransformBase):
                   """
              @@ -890,7 +890,7 @@ 

              Methods

              Expand source code -Browse git +Browse git
              def backward(self, y, x=None):
                   return (y * self.std) + self.mean
              @@ -904,7 +904,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def forward(self, y, x=None):
                   return (y - self.mean) / self.std
              @@ -918,7 +918,7 @@

              Methods

              Expand source code -Browse git +Browse git
              def set_data(self, data):
                   # take only the non-removed observations
              diff --git a/setup.py b/setup.py
              index ff3bb5f6..d69b3997 100644
              --- a/setup.py
              +++ b/setup.py
              @@ -16,7 +16,7 @@
                   long_description = f.read()
               
               setup(name='mogptk',
              -      version='0.2.1',
              +      version='0.2.2',
                     description='Multi-Output Gaussian Process ToolKit',
                     long_description=long_description,
                     long_description_content_type='text/markdown',
              @@ -24,7 +24,7 @@
                     author='Taco de Wolff, Alejandro Cuevas, Felipe Tobar',
                     author_email='tacodewolff@gmail.com',
                     license='MIT',
              -      packages=['mogptk', 'mogptk.kernels'],
              +      packages=['mogptk', 'mogptk.gpr', 'mogptk.models'],
                     keywords=['MOGP', 'MOSM', 'GP', 'Gaussian Process', 'Multi-Output', 'Tobar', 'Parra'],
                     python_requires='>=3.6',
                     install_requires=requirements,