diff --git a/src/uncertainpy/_version.py b/src/uncertainpy/_version.py index cd7e2da5..10aa336c 100644 --- a/src/uncertainpy/_version.py +++ b/src/uncertainpy/_version.py @@ -1 +1 @@ -__version__ = "1.2.3" \ No newline at end of file +__version__ = "1.2.3" diff --git a/src/uncertainpy/core/uncertainty_calculations.py b/src/uncertainpy/core/uncertainty_calculations.py index 242a18be..9fe87de3 100644 --- a/src/uncertainpy/core/uncertainty_calculations.py +++ b/src/uncertainpy/core/uncertainty_calculations.py @@ -14,6 +14,8 @@ from ..utils.logger import get_logger + + class UncertaintyCalculations(ParameterBase): """ Perform the calculations for the uncertainty quantification and @@ -139,7 +141,33 @@ def parameters(self, new_parameters): self.runmodel.parameters = self.parameters - + def calc_sens(self, dim, poly, uhat): + variance = np.sum(uhat[1:]**2) + main = np.zeros(dim) + total = np.zeros(dim) + #Compute sens + for idx, pol in enumerate(poly[1:]): + add_total = [False,]*dim + add_main = [True,]*dim + for term in pol.exponents: + for var in range(dim): + if term[var] > 0: + add_total[var] = True + add_main[var] = add_main[var] and True + add_main[0:var] = [False,]*var + try: + add_main[var+1::] = [False,]*(dim-var) + except IndexError: + pass + for var in range(dim): + if add_main[var]: + main[var] += uhat[idx+1]**2 + if add_total[var]: + total[var] += uhat[idx+1]**2 + + main = main/variance + total = total/variance + return main, total def convert_uncertain_parameters(self, uncertain_parameters=None): @@ -392,7 +420,54 @@ def create_masked_nodes_weights(self, data, feature, nodes, weights): return masked_evaluations, mask, masked_nodes, masked_weights + def get_UQ_samples(self, method, uncertain_parameters, polynomial_order, quadrature_order=None, seed=None): + if method not in ["spectral", "collocation", "MC"]: + raise ValueError("The UQ method needs to be provided (spectral, collocation or MC)") + uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) + distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) + if method == "spectral": + if quadrature_order is None: + raise ValueError("The quadrature_order must be an integer") + P = cp.orth_ttr(polynomial_order, distribution) + nodes, weights = cp.generate_quadrature(quadrature_order, + distribution, + rule="J", + sparse=True) + return uncertain_parameters, distribution, P, nodes, weights + elif method == "collocation": + P = cp.orth_ttr(polynomial_order, distribution, normed=True) + nr_collocation_nodes = quadrature_order + if nr_collocation_nodes is None: + nr_collocation_nodes = 2 * len(P) + 2 + nodes = distribution.sample(nr_collocation_nodes, "M") + return uncertain_parameters, distribution, P, nodes, None + elif method == "MC": + nr_samples = polynomial_order + if nr_samples is None: + raise ValueError("The nr_samples must be an integer") + + if seed is not None: + np.random.seed(seed) + + problem = { + "num_vars": len(uncertain_parameters), + "names": uncertain_parameters, + "bounds": [[0,1]]*len(uncertain_parameters) + } + + # Create the Multivariate normal distribution + dist_R = [] + for parameter in uncertain_parameters: + dist_R.append(cp.Uniform()) + + dist_R = cp.J(*dist_R) + + nr_sobol_samples = int(np.round(nr_samples/2.)) + + nodes_R = saltelli.sample(problem, nr_sobol_samples, calc_second_order=False) + nodes = distribution.inv(dist_R.fwd(nodes_R.transpose())) + return uncertain_parameters, distribution, None, nodes, nr_sobol_samples def create_PCE_spectral(self, @@ -478,24 +553,14 @@ def create_PCE_spectral(self, uncertainpy.Data uncertainpy.Parameters """ - uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) - - distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) - - P = cp.orth_ttr(polynomial_order, distribution) - if quadrature_order is None: quadrature_order = polynomial_order + 2 - - - nodes, weights = cp.generate_quadrature(quadrature_order, - distribution, - rule="J", - sparse=True) + uncertain_parameters, distribution, P, nodes, weights = self.get_UQ_samples("spectral", uncertain_parameters, polynomial_order, quadrature_order=quadrature_order) # Running the model data = self.runmodel.run(nodes, uncertain_parameters) + # TODO: import data here instead of running data.method = "polynomial chaos expansion with the pseudo-spectral method. polynomial_order={}, quadrature_order={}".format(polynomial_order, quadrature_order) @@ -614,20 +679,13 @@ def create_PCE_collocation(self, uncertainpy.Parameters """ - uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) - - distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) - - P = cp.orth_ttr(polynomial_order, distribution) - if nr_collocation_nodes is None: - nr_collocation_nodes = 2*len(P) + 2 - - nodes = distribution.sample(nr_collocation_nodes, "M") - + uncertain_parameters, distribution, P, nodes, _ = self.get_UQ_samples("collocation", uncertain_parameters, polynomial_order, quadrature_order=nr_collocation_nodes) # Running the model data = self.runmodel.run(nodes, uncertain_parameters) + # TODO: import data here instead of running + data.method = "polynomial chaos expansion with point collocation. polynomial_order={}, nr_collocation_nodes={}".format(polynomial_order, nr_collocation_nodes) logger = get_logger(self) @@ -643,8 +701,8 @@ def create_PCE_collocation(self, masked_evaluations, mask, masked_nodes = self.create_masked_nodes(data, feature, nodes) if (np.all(mask) or allow_incomplete) and sum(mask) > 0: - U_hat[feature] = cp.fit_regression(P, masked_nodes, - masked_evaluations) + U_hat[feature], approx[feature] = cp.fit_regression(P, masked_nodes, + masked_evaluations, retall=1) elif not allow_incomplete: logger.warning("{}: not all parameter combinations give results.".format(feature) + " No uncertainty quantification is performed since allow_incomplete=False") @@ -656,7 +714,7 @@ def create_PCE_collocation(self, if not np.all(mask): data.incomplete.append(feature) - return U_hat, distribution, data + return U_hat, approx, distribution, data, P def create_PCE_spectral_rosenblatt(self, @@ -966,7 +1024,7 @@ def create_PCE_collocation_rosenblatt(self, return U_hat, dist_R, data - def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): + def analyse_PCE(self, U_hat, approx, distribution, data, P, polynomial_order, nr_samples=10**4, save_samples=False): """ Calculate the statistical metrics from the polynomial chaos approximation. @@ -985,6 +1043,8 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): Number of samples for the Monte Carlo sampling of the polynomial chaos approximation. Default is 10**4. + save_samples: bool, optional + Save samples in feature data to, for example, plot PDFs later. Returns ------- @@ -1024,6 +1084,7 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): logger.info("Only 1 uncertain parameter. Sensitivities are not calculated") U_mc = {} + samples = distribution.sample(nr_samples, "M") for feature in tqdm(data, desc="Calculating statistics from PCE", total=len(data)): @@ -1031,19 +1092,25 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): data[feature].mean = cp.E(U_hat[feature], distribution) data[feature].variance = cp.Var(U_hat[feature], distribution) - samples = distribution.sample(nr_samples, "M") + if len(data.uncertain_parameters) > 1: U_mc[feature] = U_hat[feature](*samples) - data[feature].sobol_first = cp.Sens_m(U_hat[feature], distribution) - data[feature].sobol_total = cp.Sens_t(U_hat[feature], distribution) + #data[feature].sobol_first = cp.Sens_m(U_hat[feature], distribution) + e = np.array(U_hat[feature].exponents, dtype=int) + dim = e.shape[1] + poly = P + data[feature].sobol_first, data[feature].sobol_total = self.calc_sens(dim, poly, approx[feature]) + #data[feature].sobol_total = cp.Sens_t(U_hat[feature], distribution) data = self.average_sensitivity(data, sensitivity="sobol_first") data = self.average_sensitivity(data, sensitivity="sobol_total") else: U_mc[feature] = U_hat[feature](samples) + if save_samples: + data[feature].samples = U_mc[feature] data[feature].percentile_5 = np.percentile(U_mc[feature], 5, -1) data[feature].percentile_95 = np.percentile(U_mc[feature], 95, -1) @@ -1194,6 +1261,7 @@ def polynomial_chaos(self, nr_pc_mc_samples=10**4, allow_incomplete=True, seed=None, + save_samples=False, **custom_kwargs): """ Perform an uncertainty quantification and sensitivity analysis @@ -1347,7 +1415,7 @@ def polynomial_chaos(self, nr_collocation_nodes=nr_collocation_nodes, allow_incomplete=allow_incomplete) else: - U_hat, distribution, data = \ + U_hat, approx, distribution, data, P = \ self.create_PCE_collocation(uncertain_parameters=uncertain_parameters, polynomial_order=polynomial_order, nr_collocation_nodes=nr_collocation_nodes, @@ -1380,18 +1448,20 @@ def polynomial_chaos(self, else: raise ValueError("No polynomial chaos method with name {}".format(method)) - data = self.analyse_PCE(U_hat, distribution, data, nr_samples=nr_pc_mc_samples) + data = self.analyse_PCE(U_hat, approx, distribution, data, P, polynomial_order=polynomial_order, + nr_samples=nr_pc_mc_samples, + save_samples=save_samples) data.seed = seed return data - def monte_carlo(self, uncertain_parameters=None, nr_samples=10**4, seed=None, - allow_incomplete=True): + allow_incomplete=True, + save_samples=False): """ Perform an uncertainty quantification using the quasi-Monte Carlo method. @@ -1411,6 +1481,8 @@ def monte_carlo(self, If the uncertainty quantification should be performed for features or models with incomplete evaluations. Default is True. + save_samples: bool, optional + Save samples in feature data to, for example, plot PDFs later. Returns ------- @@ -1471,34 +1543,7 @@ def monte_carlo(self, uncertainpy.Parameters """ - if seed is not None: - np.random.seed(seed) - - uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) - - distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) - - # nodes = distribution.sample(nr_samples, "M") - - problem = { - "num_vars": len(uncertain_parameters), - "names": uncertain_parameters, - "bounds": [[0,1]]*len(uncertain_parameters) - } - - # Create the Multivariate normal distribution - dist_R = [] - for parameter in uncertain_parameters: - dist_R.append(cp.Uniform()) - - dist_R = cp.J(*dist_R) - - nr_sobol_samples = int(np.round(nr_samples/2.)) - - nodes_R = saltelli.sample(problem, nr_sobol_samples, calc_second_order=False) - - nodes = distribution.inv(dist_R.fwd(nodes_R.transpose())) - + uncertain_parameters, distribution, _, nodes, nr_sobol_samples = self.get_UQ_samples("MC", uncertain_parameters, nr_samples) data = self.runmodel.run(nodes, uncertain_parameters) @@ -1522,6 +1567,8 @@ def monte_carlo(self, logger = get_logger(self) if (np.all(mask) or allow_incomplete) and sum(mask) > 0: + if save_samples: + data[feature].samples = masked_evaluations data[feature].mean = np.mean(masked_evaluations, 0) data[feature].variance = np.var(masked_evaluations, 0) @@ -1694,4 +1741,4 @@ def average_sensitivity(self, data, sensitivity="sobol_first"): data[feature][sensitivity + "_average"] = np.array(total_sense) - return data \ No newline at end of file + return data diff --git a/src/uncertainpy/data.py b/src/uncertainpy/data.py index 84bbfd67..c6ccf1fb 100644 --- a/src/uncertainpy/data.py +++ b/src/uncertainpy/data.py @@ -2,7 +2,10 @@ import six import os -import collections +try: + from collections import MutableMapping +except ImportError: + from collections.abc import MutableMapping import numpy as np @@ -11,7 +14,7 @@ from ._version import __version__ -class DataFeature(collections.MutableMapping): +class DataFeature(MutableMapping): """ Store the results of each statistical metric calculated from the uncertainty quantification and sensitivity analysis for a single model/feature. @@ -291,7 +294,7 @@ def ndim(self): return None -class Data(collections.MutableMapping): +class Data(MutableMapping): """ Store the results of each statistical metric calculated from the uncertainty quantification and sensitivity analysis for each model/features. @@ -696,7 +699,6 @@ def add_group(group, values, name="evaluation"): for feature in self.data: group = f.create_group(feature) - for statistical_metric in self[feature]: if statistical_metric in ["evaluations", "time"]: if is_regular(self[feature][statistical_metric]): @@ -810,6 +812,9 @@ def append_evaluations(evaluations, group): if "model ignore" in f.attrs: self.model_ignore = f.attrs["model ignore"] + if "samples" in f.attrs: + self.samples = f.attrs["samples"] + for feature in f: self.add_features(str(feature)) diff --git a/src/uncertainpy/parameters.py b/src/uncertainpy/parameters.py index a24c73af..a2df484f 100755 --- a/src/uncertainpy/parameters.py +++ b/src/uncertainpy/parameters.py @@ -5,6 +5,10 @@ import fileinput import sys import collections +try: + from collections import MutableMapping +except ImportError: + from collections.abc import MutableMapping import chaospy as cp @@ -145,7 +149,7 @@ def __str__(self): -class Parameters(collections.MutableMapping): +class Parameters(MutableMapping): """ A collection of parameters. diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 77f6b511..a2430455 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -1,17 +1,17 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import glob import os +import itertools import matplotlib.pyplot as plt import numpy as np +import seaborn as sns from .prettyplot import prettyPlot, prettyBar from .prettyplot import spines_color, get_current_colormap -from .prettyplot import get_colormap_tableu20, set_style, get_colormap, reset_style -from .prettyplot import axis_grey, labelsize, fontsize, titlesize, linewidth +from .prettyplot import set_style, get_colormap, reset_style +from .prettyplot import labelsize, fontsize, titlesize, linewidth -import seaborn as sns from ..data import Data from ..utils.logger import setup_module_logger, get_logger @@ -21,7 +21,6 @@ # TODO Change the use of **plot_kwargs to use a dict for specific plotting commands? - class PlotUncertainty(object): """ Plotting the results from the uncertainty quantification and sensitivity @@ -59,7 +58,9 @@ def __init__(self, filename=None, folder="figures/", figureformat=".png", - logger_level="info"): + logger_level="info", + uncertain_names=[], + **kwargs): self._folder = None @@ -69,15 +70,37 @@ def __init__(self, self.features_in_combined_plot = 3 self.data = None + self.uncertain_names = uncertain_names self._logger_level = logger_level + logger = get_logger(self) if filename is not None: self.load(filename) + if not self.uncertain_names: + logger.warning("no uncertain names passed for labels - will use parameter names") + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") setup_module_logger(class_instance=self, level=logger_level) - + font_settings = ["fontsize", "linewidth", "titlesize", "labelsize", "dpi", "max_legend_size"] + for k in font_settings: + if k in kwargs: + setattr(self, k, kwargs[k]) + else: + if k == "linewidth": + self.linewidth = linewidth + elif k == "titlesize": + self.titlesize = titlesize + elif k == "fontsize": + self.fontsize = fontsize + elif k == "labelsize": + self.labelsize = labelsize + elif k == "dpi": + self.dpi = None + elif k == "max_legend_size": + self.max_legend_size = None def load(self, filename): """ @@ -88,9 +111,29 @@ def load(self, filename): filename : str Name of the file to load data from. """ + logger = get_logger(self) self.data = Data(filename, logger_level=self._logger_level) + if not self.uncertain_names: + logger.warning("no uncertain names passed for labels - will use parameter names") + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") + def set_data(self, data): + """ + Set data from dictionary. + + Parameters + ---------- + data : dict + Data dictionary + """ + logger = get_logger(self) + self.data = data + if not self.uncertain_names: + logger.warning("no uncertain names passed for labels - will use parameter names") + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") @property def folder(self): @@ -105,7 +148,6 @@ def folder(self): """ return self._folder - @folder.setter def folder(self, new_folder): self._folder = new_folder @@ -113,9 +155,6 @@ def folder(self, new_folder): if new_folder is not None and not os.path.isdir(new_folder): os.makedirs(new_folder) - - - def all_evaluations(self, foldername="evaluations"): """ Plot all evaluations for all model and features. @@ -129,7 +168,6 @@ def all_evaluations(self, foldername="evaluations"): for feature in self.data.data: self.evaluations(feature=feature, foldername=foldername) - def evaluations(self, feature=None, foldername="", **plot_kwargs): """ Plot all evaluations for a specific model/feature. @@ -142,6 +180,10 @@ def evaluations(self, feature=None, foldername="", **plot_kwargs): foldername : str, optional Name of folder where to save all plots. The folder is created if it does not exist. Default folder is named "featurename_evaluations". + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -183,9 +225,7 @@ def evaluations(self, feature=None, foldername="", **plot_kwargs): else: raise AttributeError("Dimension of evaluations is not valid: dim {}".format(dimension)) - - - def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): + def evaluations_0d(self, feature=None, foldername="", palette="colorblind", **plot_kwargs): """ Plot all 0D evaluations for a specific model/feature. @@ -231,17 +271,16 @@ def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): ylabel=self.data.get_labels(feature)[0], title="{}, evaluations".format(feature.replace("_", " ")), new_figure=True, - palette="husl", + palette=palette, **plot_kwargs) plt.tight_layout() - plt.savefig(os.path.join(save_folder, "evaluations" + self.figureformat)) + plt.savefig(os.path.join(save_folder, "evaluations" + self.figureformat), dpi=self.dpi) plt.close() reset_style() - - def evaluations_1d(self, feature=None, foldername="", **plot_kwargs): + def evaluations_1d(self, feature=None, foldername="", palette="colorblind", xscale='linear', yscale='linear', **plot_kwargs): """ Plot all 1D evaluations for a specific model/feature. @@ -271,7 +310,7 @@ def evaluations_1d(self, feature=None, foldername="", **plot_kwargs): if feature is None: feature = self.data.model_name - if feature not in self.data or "evaluations" not in self.data[feature]: + if feature not in self.data or "evaluations" not in self.data[feature]: logger.warning("No model evaluations to plot.") return @@ -291,7 +330,6 @@ def evaluations_1d(self, feature=None, foldername="", **plot_kwargs): else: time = self.data[feature].time - padding = len(str(len(self.data[feature].evaluations) + 1)) for i, evaluation in enumerate(self.data[feature].evaluations): @@ -302,22 +340,22 @@ def evaluations_1d(self, feature=None, foldername="", **plot_kwargs): time = self.data[feature].time[i] ax = prettyPlot(time, evaluation, - xlabel=xlabel.capitalize(), - ylabel=ylabel.capitalize(), + xlabel=xlabel, + ylabel=ylabel, title="{}, evaluation {:d}".format(feature.replace("_", " "), i), new_figure=True, - palette="husl", + palette=palette, **plot_kwargs) ax.set_xlim([min(time), max(time)]) + ax.set_xscale(xscale) + ax.set_yscale(yscale) plt.tight_layout() plt.savefig(os.path.join(save_folder, - "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat)) + "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat), dpi=self.dpi) plt.close() reset_style() - - def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): """ Plot all 2D evaluations for a specific model/feature. @@ -356,7 +394,7 @@ def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): if self.data.ndim(feature) != 2: raise ValueError("{} is not a 2 dimensional feature.".format(feature)) - set_style("seaborn-dark") + set_style("classic") save_folder = os.path.join(self.folder, foldername, feature + "_evaluations") if not os.path.isdir(save_folder): @@ -385,24 +423,25 @@ def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): cbar = fig.colorbar(iax) cbar.ax.set_ylabel(zlabel) - ax.set_xlabel(xlabel.capitalize()) - ax.set_ylabel(ylabel.capitalize()) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) plt.tight_layout() plt.savefig(os.path.join(save_folder, - "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat)) + "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat), dpi=self.dpi) plt.close() reset_style() - - - def attribute_feature_1d(self, feature=None, attribute="mean", attribute_name="mean", hardcopy=True, show=False, + palette="colorblind", + xscale="linear", + yscale="linear", + title=None, **plot_kwargs): """ Plot a 1 dimensional attribute for a specific model/feature. @@ -421,6 +460,12 @@ def attribute_feature_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -458,16 +503,18 @@ def attribute_feature_1d(self, else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels - title = feature + ", " + attribute_name + if title is None: + title = feature + ", " + attribute_name ax = prettyPlot(time, self.data[feature][attribute], - title.replace("_", " "), xlabel.capitalize(), ylabel.capitalize(), + title.replace("_", " "), xlabel, ylabel, nr_colors=3, - palette="husl", + palette=palette, **plot_kwargs) + ax.set_xscale(xscale) + ax.set_yscale(yscale) ax.set_xlim([min(time), max(time)]) @@ -476,7 +523,7 @@ def attribute_feature_1d(self, if hardcopy: plt.savefig(os.path.join(self.folder, - save_name + self.figureformat)) + save_name + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -485,8 +532,6 @@ def attribute_feature_1d(self, reset_style() - - def attribute_feature_2d(self, feature=None, attribute="mean", @@ -538,7 +583,6 @@ def attribute_feature_2d(self, if attribute not in ["mean", "variance"]: raise ValueError("{} is not a supported attribute".format(attribute)) - if attribute not in self.data[feature]: msg = " Unable to plot {attribute_name}. {attribute_name} of {feature} does not exist." logger.warning(msg.format(attribute_name=attribute, feature=feature)) @@ -547,22 +591,19 @@ def attribute_feature_2d(self, if self.data[feature].time is None or np.all(np.isnan(self.data[feature].time)): extent = None else: - extent=[self.data[feature].time[0], self.data[feature].time[-1], - 0, self.data[feature][attribute].shape[0]] - - + extent = [self.data[feature].time[0], self.data[feature].time[-1], + 0, self.data[feature][attribute].shape[0]] title = feature + ", " + attribute_name labels = self.data.get_labels(feature) xlabel, ylabel, zlabel = labels - set_style("seaborn-dark") + set_style("classic") fig = plt.figure() ax = fig.add_subplot(111) ax.set_title(title.replace("_", " ")) - iax = ax.imshow(self.data[feature][attribute], cmap="viridis", aspect="auto", extent=extent, **plot_kwargs) @@ -571,8 +612,8 @@ def attribute_feature_2d(self, # cbar.ax.set_title(zlabel) cbar.ax.set_ylabel(zlabel) - ax.set_xlabel(xlabel.capitalize(), fontsize=labelsize) - ax.set_ylabel(ylabel.capitalize(), fontsize=labelsize) + ax.set_xlabel(xlabel, fontsize=self.labelsize) + ax.set_ylabel(ylabel, fontsize=self.labelsize) save_name = feature + "_" + attribute_name @@ -580,7 +621,7 @@ def attribute_feature_2d(self, if hardcopy: plt.savefig(os.path.join(self.folder, - save_name + self.figureformat)) + save_name + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -589,9 +630,7 @@ def attribute_feature_2d(self, reset_style() - - - def mean_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): + def mean_1d(self, feature, hardcopy=True, show=False, palette="colorblind", xscale='linear', yscale='linear', title=None, **plot_kwargs): """ Plot the mean for a specific 1 dimensional model/feature. @@ -603,6 +642,12 @@ def mean_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -618,11 +663,13 @@ def mean_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): attribute_name="mean", hardcopy=hardcopy, show=show, + xscale=xscale, + yscale=yscale, + title=title, color=0, **plot_kwargs) - - def variance_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): + def variance_1d(self, feature, hardcopy=True, show=False, palette="colorblind", xscale='linear', yscale='linear', title=None, **plot_kwargs): """ Plot the variance for a specific 1 dimensional model/feature. @@ -634,6 +681,12 @@ def variance_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -649,6 +702,9 @@ def variance_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): attribute_name="variance", hardcopy=hardcopy, show=show, + xscale=xscale, + yscale=yscale, + title=title, color=2, **plot_kwargs) @@ -681,7 +737,6 @@ def mean_2d(self, feature, hardcopy=True, show=False, **plot_kwargs): show=show, **plot_kwargs) - def variance_2d(self, feature, hardcopy=True, show=False, **plot_kwargs): """ Plot the variance for a specific 2 dimensional model/feature. @@ -711,12 +766,16 @@ def variance_2d(self, feature, hardcopy=True, show=False, **plot_kwargs): show=show, **plot_kwargs) - def mean_variance_1d(self, feature=None, new_figure=True, hardcopy=True, show=False, + xscale='linear', + yscale='linear', + palette="colorblind", + color_axes=True, + title=None, **plot_kwargs): """ Plot the mean and variance for a specific 1 dimensional model/feature. @@ -730,6 +789,12 @@ def mean_variance_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -753,7 +818,7 @@ def mean_variance_1d(self, if "mean" not in self.data[feature] or "variance" not in self.data[feature]: msg = "Mean and/or variance of {feature} does not exist. ".format(feature=feature) \ - + "Unable to plot mean and variance" + + "Unable to plot mean and variance" logger.warning(msg) return @@ -762,57 +827,65 @@ def mean_variance_1d(self, else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels - - style="seaborn-white" - title = feature + ", mean and variance" + style = "classic" + if title is None: + title = feature + ", mean and variance" ax = prettyPlot(time, self.data[feature].mean, - title.replace("_", " "), xlabel.capitalize(), ylabel.capitalize() + ", mean", + title.replace("_", " "), xlabel, ylabel + ", mean", style=style, nr_colors=3, - palette="husl", + palette=palette, + labelfontsize=self.labelsize, + ffontsize=self.fontsize, **plot_kwargs) - + ax.set_yscale(yscale) + ax.set_xscale(xscale) colors = get_current_colormap() ax2 = ax.twinx() color = 0 color_2 = 2 - spines_color(ax2, edges={"top": "None", "bottom": "None", + axescolor = "black" + if color_axes: + spines_color(ax2, edges={"top": "None", "bottom": "None", "right": colors[color_2], "left": "None"}) + axescolor = colors[color_2] ax2.tick_params(axis="y", which="both", right=False, left=False, labelright=True, - color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) - ax2.set_ylabel(ylabel.capitalize() + r"$^2$, variance", color=colors[color_2], fontsize=labelsize) + color=axescolor, labelcolor=axescolor, labelsize=self.labelsize) + ax2.set_ylabel("(" + ylabel + r")$^2$, variance", color=colors[color_2], fontsize=self.labelsize) # ax2.set_ylim([min(self.data.variance[feature]), max(self.data.variance[feature])]) ax2.plot(time, self.data[feature].variance, - color=colors[color_2], linewidth=linewidth, antialiased=True) + color=colors[color_2], linewidth=self.linewidth, antialiased=True) - ax2.yaxis.offsetText.set_fontsize(fontsize) - ax2.yaxis.offsetText.set_color(colors[color_2]) + ax2.yaxis.offsetText.set_fontsize(self.fontsize) + ax2.yaxis.offsetText.set_color(axescolor) ax2.spines["right"].set_visible(True) - ax2.spines["right"].set_edgecolor(colors[color_2]) - - ax.tick_params(axis="y", color=colors[color], labelcolor=colors[color]) - ax.spines["left"].set_edgecolor(colors[color]) - ax.set_ylabel(ylabel + ", mean", color=colors[color], fontsize=labelsize) + ax2.spines["right"].set_edgecolor(axescolor) + + if color_axes: + axescolor = colors[color] + ax.tick_params(axis="y", color=axescolor, labelcolor=axescolor) + ax.spines["left"].set_edgecolor(axescolor) + ax.set_ylabel(ylabel + ", mean", color=axescolor, fontsize=self.labelsize) ax2.set_xlim([min(time), max(time)]) ax.set_xlim([min(time), max(time)]) - + ax2.set_yscale(yscale) + ax2.set_xscale(xscale) plt.tight_layout() if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_mean-variance" + self.figureformat)) + feature + "_mean-variance" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -824,12 +897,19 @@ def mean_variance_1d(self, # if not show or not hardcopy: # return ax, ax2 - - def prediction_interval_1d(self, feature=None, hardcopy=True, show=False, + title=None, + xscale='linear', + yscale='linear', + palette="colorblind", + xmin=None, + xmax=None, + ymin=None, + ymax=None, + legend_loc="best", **plot_kwargs): """ Plot the prediction interval for a specific 1 dimensional model/feature. @@ -843,6 +923,12 @@ def prediction_interval_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + title: string, optional + Choose a customized title + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -871,39 +957,55 @@ def prediction_interval_1d(self, logger.warning(msg.format(feature=feature)) return - if self.data[feature].time is None or np.all(np.isnan(self.data[feature].time)): time = np.arange(0, len(self.data[feature].mean)) else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels - title = feature.replace("_", " ") + ", 90% prediction interval" + plot_kwargs["label"] = "Mean" + if title is None: + title = feature.replace("_", " ") + ", 90% prediction interval" ax = prettyPlot(time, self.data[feature].mean, title=title, - xlabel=xlabel.capitalize(), ylabel=ylabel.capitalize(), + xlabel=xlabel, ylabel=ylabel, + labelfontsize=self.labelsize, + ffontsize=self.fontsize, color=0, nr_colors=3, - palette="husl", + palette=palette, **plot_kwargs) colors = get_current_colormap() ax.fill_between(time, - self.data[feature].percentile_5, - self.data[feature].percentile_95, - alpha=0.5, color=colors[0], - linewidth=0) + self.data[feature].percentile_5, + self.data[feature].percentile_95, + alpha=0.5, color=colors[0], + linewidth=0, label="90% prediction interval") - ax.set_xlim([min(time), max(time)]) - plt.legend(["Mean", "90% prediction interval"], loc="best") + if xmin is None: + ax.set_xlim(min(time)) + else: + ax.set_xlim(xmin) + if xmax is None: + ax.set_xlim(right=max(time)) + else: + ax.set_xlim(right=xmax) + if ymin is not None: + ax.set_ylim(ymin) + if ymax is not None: + ax.set_ylim(top=ymax) + + ax.set_xscale(xscale) + ax.set_yscale(yscale) + plt.legend(["Mean", "90% prediction interval"], loc=legend_loc) plt.tight_layout() if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_prediction-interval" + self.figureformat)) + feature + "_prediction-interval" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -912,12 +1014,15 @@ def prediction_interval_1d(self, reset_style() - def sensitivity_1d(self, feature=None, sensitivity="first", hardcopy=True, show=False, + xscale='linear', + yscale='linear', + palette="colorblind", + title=None, **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -936,6 +1041,12 @@ def sensitivity_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -954,7 +1065,7 @@ def sensitivity_1d(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") @@ -979,22 +1090,28 @@ def sensitivity_1d(self, xlabel, ylabel = labels for i in range(len(self.data[feature][sensitivity])): + if title is None: + title = title_tmp + ", " + feature.replace("_", " ") + " - " + self.uncertain_names[i] ax = prettyPlot(time, self.data[feature][sensitivity][i], - title=title.capitalize() + ", " + feature.replace("_", " ") + " - " + self.data.uncertain_parameters[i], - xlabel=xlabel.capitalize(), - ylabel=title.capitalize(), + title=title, + xlabel=xlabel, + ylabel=title, color=i, - palette="husl", + palette=palette, + labelfontsize=self.labelsize, + ffontsize=self.fontsize, nr_colors=len(self.data.uncertain_parameters), **plot_kwargs) # plt.ylim([0, 1.05]) ax.set_xlim([min(time), max(time)]) + ax.set_xscale(xscale) + ax.set_yscale(yscale) plt.tight_layout() if hardcopy: plt.savefig(os.path.join(self.folder, feature + "_" + sensitivity + "_" - + self.data.uncertain_parameters[i] + self.figureformat)) + + self.data.uncertain_parameters[i] + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1003,13 +1120,15 @@ def sensitivity_1d(self, reset_style() - - def sensitivity_1d_grid(self, feature=None, sensitivity="first", hardcopy=True, show=False, + xscale="linear", + yscale="linear", + palette="colorblind", + title=None, **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -1029,6 +1148,12 @@ def sensitivity_1d_grid(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -1047,8 +1172,7 @@ def sensitivity_1d_grid(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) - + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") @@ -1069,32 +1193,37 @@ def sensitivity_1d_grid(self, else: time = self.data[feature].time - parameter_names = self.data.uncertain_parameters + parameter_names = self.uncertain_names # get size of the grid in x and y directions nr_plots = len(parameter_names) grid_size = np.ceil(np.sqrt(nr_plots)) grid_x_size = int(grid_size) - grid_y_size = int(np.ceil(nr_plots/float(grid_x_size))) + grid_y_size = int(np.ceil(nr_plots / float(grid_x_size))) - set_style("seaborn-darkgrid") + set_style("classic") fig, axes = plt.subplots(nrows=grid_y_size, ncols=grid_x_size, squeeze=False, sharex="col", sharey="row") labels = self.data.get_labels(feature) xlabel, ylabel = labels + if title is None: + title = title_tmp + # Add a larger subplot to use to set a common xlabel and ylabel - set_style("seaborn-white") + set_style("classic") ax = fig.add_subplot(111, zorder=-10) spines_color(ax, edges={"top": "None", "bottom": "None", "right": "None", "left": "None"}) ax.tick_params(labelcolor="w", top=False, bottom=False, left=False, right=False) - ax.set_xlabel(xlabel.capitalize(), labelpad=8) - ax.set_ylabel(title.capitalize()) + ax.set_xlabel(xlabel, labelpad=8) + ax.set_ylabel(title) + ax.set_xscale(xscale) + ax.set_yscale(yscale) - for i in range(0, grid_x_size*grid_y_size): + for i in range(0, grid_x_size * grid_y_size): nx = i % grid_x_size - ny = int(np.floor(i/float(grid_x_size))) + ny = int(np.floor(i / float(grid_x_size))) ax = axes[ny][nx] @@ -1104,7 +1233,10 @@ def sensitivity_1d_grid(self, color=i, nr_colors=nr_plots, ax=ax, - palette="husl", + palette=palette, + labelfontsize=self.labelsize, + ffontsize=self.fontsize, + **plot_kwargs) # for tick in ax.get_xticklabels(): @@ -1113,19 +1245,18 @@ def sensitivity_1d_grid(self, ax.set_ylim([0, 1.05]) ax.set_xlim([min(time), max(time)]) # ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) - ax.tick_params(labelsize=10) + ax.tick_params(labelsize=self.labelsize) else: ax.set_axis_off() - title = title.capitalize() + ", " + feature.replace("_", " ") - plt.suptitle(title, fontsize=titlesize) + title = title + ", " + feature.replace("_", " ") + plt.suptitle(title, fontsize=self.titlesize) plt.tight_layout() plt.subplots_adjust(top=0.9) - if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_" + sensitivity + "_grid" + self.figureformat)) + feature + "_" + sensitivity + "_grid" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1134,13 +1265,16 @@ def sensitivity_1d_grid(self, reset_style() - - def sensitivity_1d_combined(self, feature=None, sensitivity="first", hardcopy=True, show=False, + xscale="linear", + yscale="linear", + title=None, + palette="colorblind", + sobol_limit=.0, **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -1159,6 +1293,12 @@ def sensitivity_1d_combined(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -1177,7 +1317,7 @@ def sensitivity_1d_combined(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") @@ -1198,34 +1338,49 @@ def sensitivity_1d_combined(self, else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels + # plot sensitivity + number_colors = len(self.data.uncertain_parameters) + + + color_idx = 0 for i in range(len(self.data[feature][sensitivity])): + if not np.any(self.data[feature][sensitivity][i] > sobol_limit): + color_idx += 1 + continue + + if title is None: + title = title_tmp + ", " + feature.replace("_", " ") prettyPlot(time, self.data[feature][sensitivity][i], - title=title.capitalize() + ", " + feature.replace("_", " "), - xlabel=xlabel.capitalize(), - ylabel=title.capitalize(), + title=title, + xlabel=xlabel, + ylabel=title_tmp, new_figure=False, - color=i, - palette="husl", - nr_colors=len(self.data.uncertain_parameters), - label=self.data.uncertain_parameters[i], + color=color_idx, + palette=palette, + nr_colors=number_colors, + label=self.uncertain_names[i], + labelfontsize=self.labelsize, + ffontsize=self.fontsize, **plot_kwargs) + color_idx += 1 plt.ylim([0, 1.05]) plt.xlim([min(time), max(time)]) + plt.xscale(xscale) + plt.yscale(yscale) if len(self.data[feature][sensitivity]) > 4: - plt.xlim([time[0], 1.3*time[-1]]) + plt.xlim([time[0], 1.3 * time[-1]]) plt.legend() plt.tight_layout() if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_" + sensitivity + self.figureformat)) + feature + "_" + sensitivity + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1234,8 +1389,7 @@ def sensitivity_1d_combined(self, reset_style() - - def features_1d(self, sensitivity="first"): + def features_1d(self, sensitivity="first", xscale='linear', yscale='linear', palette="colorblind", title=None): """ Plot all data for all 1 dimensional model/features. @@ -1251,6 +1405,12 @@ def features_1d(self, sensitivity="first"): order Sobol indices, while "sobol_total" and "total" are the total order Sobol indices. If None, no sensitivity is plotted. Default is "first". + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. + title: string, optional + Choose a customized title Raises ------ @@ -1282,17 +1442,15 @@ def features_1d(self, sensitivity="first"): for feature in self.data: if self.data.ndim(feature) == 1: - self.mean_1d(feature=feature) - self.variance_1d(feature=feature) - self.mean_variance_1d(feature=feature) - self.prediction_interval_1d(feature=feature) + self.mean_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) + self.variance_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) + self.mean_variance_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) + self.prediction_interval_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) if sensitivity in self.data[feature]: - self.sensitivity_1d(feature=feature, sensitivity=sensitivity) - self.sensitivity_1d_combined(feature=feature, sensitivity=sensitivity) - self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity) - - + self.sensitivity_1d(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale, title=title) + self.sensitivity_1d_combined(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale, title=title) + self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale, title=title) def convert_sensitivity(self, sensitivity): """ @@ -1322,13 +1480,12 @@ def convert_sensitivity(self, sensitivity): full_text = "" if sensitivity == "sobol_first": - full_text = "first order Sobol indices" + full_text = "First order Sobol indices" elif sensitivity == "sobol_total": - full_text = "total order Sobol indices" + full_text = "Total order Sobol indices" return sensitivity, full_text - def features_2d(self): """ Plot all implemented plots for all 2 dimensional model/features. @@ -1347,15 +1504,17 @@ def features_2d(self): self.mean_2d(feature=feature) self.variance_2d(feature=feature) - # TODO not finished, missing correct label placement # TODO test that plotting with no sensitivity works def feature_0d(self, feature, sensitivity="first", + error="variance", hardcopy=True, + palette="colorblind", show=False, - max_legend_size=5): + max_legend_size=5, + title=None): """ Plot all attributes (mean, variance, p_05, p_95 and sensitivity of it exists) for a 0 dimensional model/feature. @@ -1370,6 +1529,8 @@ def feature_0d(self, order Sobol indices, while "sobol_total" and "total" are the total order Sobol indices. If None, no sensitivity is plotted. Default is "first". + error: {"variance", "stddev"}, optional + Plot either variance or standard deviation (square root of variance). hardcopy : bool, optional If the plot should be saved to file. Default is True. show : bool, optional @@ -1392,7 +1553,6 @@ def feature_0d(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total", None]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total or None, not {}".format(sensitivity)) - sensitivity, label = self.convert_sensitivity(sensitivity) if self.data is None: @@ -1404,90 +1564,94 @@ def feature_0d(self, for data_type in ["mean", "variance", "percentile_5", "percentile_95"]: if data_type not in self.data[feature]: msg = "{data_type} for {feature} does not exist. Unable to plot." - logger.warning(msg.format(data_type=data_type,feature=feature)) + logger.warning(msg.format(data_type=data_type, feature=feature)) return + if self.max_legend_size is not None and isinstance(self.max_legend_size, int): + max_legend_size = self.max_legend_size + + if len(self.data.uncertain_parameters) > max_legend_size: legend_size = max_legend_size else: legend_size = len(self.data.uncertain_parameters) - legend_width = np.ceil(len(self.data.uncertain_parameters)/float(max_legend_size)) + legend_width = np.ceil(len(self.data.uncertain_parameters) / float(max_legend_size)) width = 0.2 distance = 0.5 - xlabels = ["Mean", "Variance", "$P_5$", "$P_{95}$"] - xticks = [0, width, distance + width, distance + 2*width] + error_label = "Variance" + if error == "stddev": + error_label = "SD" + xlabels = ["Mean", error_label, "$P_5$", "$P_{95}$"] + xticks = [0, width, distance + width, distance + 2 * width] - values = [self.data[feature].mean, self.data[feature].variance, + error_value = self.data[feature].variance + if error == "stddev": + error_value = np.sqrt(error_value) + values = [self.data[feature].mean, error_value, self.data[feature].percentile_5, self.data[feature].percentile_95] ylabel = self.data.get_labels(feature)[0] ax = prettyBar(values, index=xticks, + labelfontsize=self.labelsize, xlabels=xlabels, - ylabel=ylabel.capitalize(), + ylabel=ylabel, palette="Paired", - style="seaborn-white") + style="classic") if sensitivity in self.data[feature]: - pos = 2*distance + 2*width + pos = 2 * distance + 2 * width ax2 = ax.twinx() spines_color(ax2, edges={"top": "None", "bottom": "None", - "right": axis_grey, "left": "None"}) + "left": "None"}) ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, - color=axis_grey, labelcolor="black", labelsize=labelsize) - ax2.set_ylabel(label.capitalize(), fontsize=labelsize) + labelcolor="black", labelsize=self.labelsize) + ax2.set_ylabel(label, fontsize=self.labelsize) ax2.set_ylim([0, 1.05]) - ax2.spines["right"].set_visible(True) - ax2.spines["right"].set_edgecolor(axis_grey) - i = 0 legend_bars = [] - colors = get_colormap(palette="husl", nr_colors=len(self.data.uncertain_parameters)) + colors = get_colormap(palette=palette, nr_colors=len(self.data.uncertain_parameters)) for parameter in self.data.uncertain_parameters: - - l = ax2.bar(pos, self.data[feature][sensitivity][i], width=width, - align="center", color=colors[i], linewidth=0) - - legend_bars.append(l) + ll = ax2.bar(pos, self.data[feature][sensitivity][i], width=width, + align="center", color=colors[i], linewidth=0) + legend_bars.append(ll) i += 1 pos += width - xticks.append(pos - (i/2. + 0.5)*width) - xlabels.append(sensitivity.split("_")[0] + " " + sensitivity.split("_")[1]) + xticks.append(pos - (i / 2. + 0.5) * width) + xlabels.append(sensitivity.split("_")[0].capitalize() + " " + sensitivity.split("_")[1]) - location = (0.5, 1.01 + legend_width*0.095) + location = (0.5, 1.01 + legend_width * 0.095) plt.legend(legend_bars, - self.data.uncertain_parameters, + self.uncertain_names, loc="upper center", bbox_to_anchor=location, - ncol=legend_size) - - # lgd.get_frame().set_edgecolor(axis_grey) + ncol=legend_size, + fontsize=self.fontsize) fig = plt.gcf() - fig.subplots_adjust(top=(0.91 - legend_width*0.053)) - + fig.subplots_adjust(top=(0.91 - legend_width * 0.053)) ax.set_xticks(xticks) - ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) + ax.set_xticklabels(xlabels, fontsize=self.labelsize, rotation=0) if len(self.data.uncertain_parameters) > 3: for tick in ax.get_xticklabels()[:2]: tick.set_rotation(-25) - - plt.suptitle(feature.replace("_", " "), fontsize=titlesize) + if title is not None: + plt.suptitle(feature.replace("_", " "), fontsize=self.titlesize) if sensitivity is None or sensitivity not in self.data[feature]: plt.subplots_adjust(top=0.93) @@ -1497,8 +1661,10 @@ def feature_0d(self, else: save_name = feature + "_" + sensitivity + self.figureformat + plt.tight_layout() + if hardcopy: - plt.savefig(os.path.join(self.folder, save_name)) + plt.savefig(os.path.join(self.folder, save_name), dpi=self.dpi) if show: plt.show() @@ -1509,11 +1675,12 @@ def feature_0d(self, # return ax - def average_sensitivity(self, - feature, + feature=None, sensitivity="first", hardcopy=True, + title=None, + palette="colorblind", show=False): """ Plot the average of the sensitivity for a specific model/feature. @@ -1529,6 +1696,8 @@ def average_sensitivity(self, order Sobol indices. Default is "first". hardcopy : bool, optional If the plot should be saved to file. Default is True. + title: string, optional + Choose a customized title show : bool, optional If the plot should be shown on screen. Default is False. @@ -1547,11 +1716,14 @@ def average_sensitivity(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") + if feature is None: + feature = self.data.model_name + if feature not in self.data: raise ValueError("{} is not a feature".format(feature)) @@ -1562,17 +1734,21 @@ def average_sensitivity(self, width = 0.2 - index = np.arange(1, len(self.data.uncertain_parameters)+1)*width + index = np.arange(1, len(self.data.uncertain_parameters) + 1) * width + if title is None: + title = "Average of " + title_tmp[0].lower() + title_tmp[1:] + ", " + feature.replace("_", " ") + elif title is False: + title = "" prettyBar(self.data[feature][sensitivity + "_average"], - title="Average of " + title + ", " + feature.replace("_", " "), - xlabels=self.data.uncertain_parameters, - ylabel="Average of " + title, + title=title, + xlabels=self.uncertain_names, + ylabel="Average of " + title_tmp[0].lower() + title_tmp[1:], nr_colors=len(self.data.uncertain_parameters), - palette="husl", + labelfontsize=self.labelsize, + palette=palette, index=index, - style="seaborn-darkgrid") - + style="classic") plt.ylim([0, 1]) @@ -1581,7 +1757,7 @@ def average_sensitivity(self, plt.tight_layout() if hardcopy: - plt.savefig(os.path.join(self.folder, save_name)) + plt.savefig(os.path.join(self.folder, save_name), dpi=self.dpi) if show: plt.show() @@ -1590,10 +1766,11 @@ def average_sensitivity(self, reset_style() - def average_sensitivity_all(self, sensitivity="first", hardcopy=True, + title=None, + palette="colorblind", show=False): """ Plot the average of the sensitivity for all model/features. @@ -1606,6 +1783,8 @@ def average_sensitivity_all(self, order Sobol indices. Default is "first". hardcopy : bool, optional If the plot should be saved to file. Default is True. + title: string, optional + Choose a customized title show : bool, optional If the plot should be shown on screen. Default is False. @@ -1623,18 +1802,17 @@ def average_sensitivity_all(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, _ = self.convert_sensitivity(sensitivity) for feature in self.data: if sensitivity + "_average" in self.data[feature]: self.average_sensitivity(feature=feature, sensitivity=sensitivity, hardcopy=hardcopy, + title=title, show=show) - - def features_0d(self, sensitivity="first", hardcopy=True, show=False): + def features_0d(self, sensitivity="first", error="variance", hardcopy=True, show=False): """ Plot the results for all 0 dimensional model/features. @@ -1644,6 +1822,8 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): Which Sobol indices to plot. "sobol_first" and "first" is the first order Sobol indices, while "sobol_total" and "total" are the total order Sobol indices. Default is "first". + error: {"variance", "stddev"}, optional + Plot either variance or standard deviation (square root of variance). hardcopy : bool, optional If the plot should be saved to file. Default is True. show : bool, optional @@ -1662,9 +1842,7 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): for feature in self.data: if self.data.ndim(feature) == 0: - self.feature_0d(feature, sensitivity=sensitivity, hardcopy=hardcopy, show=show) - - + self.feature_0d(feature, sensitivity=sensitivity, error=error, hardcopy=hardcopy, show=show) # # TODO Not Tested # def plot_folder(self, data_dir): @@ -1675,7 +1853,6 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): # self.plot_all() - # def plot_allNoSensitivity(self, sensitivity="first"): # if self.data is None: # raise ValueError("Datafile must be loaded.") @@ -1684,7 +1861,6 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): # self.features_1d(sensitivity=sensitivity) # self.features_0d(sensitivity=sensitivity) - def plot_all(self, sensitivity="first"): """ Plot the results for all model/features, with the chosen sensitivity. @@ -1716,8 +1892,6 @@ def plot_all(self, sensitivity="first"): self.average_sensitivity_all(sensitivity=sensitivity) self.average_sensitivity_grid(sensitivity=sensitivity) - - # TODO find a more descriptive name def plot_all_sensitivities(self): """ @@ -1744,14 +1918,15 @@ def plot_all_sensitivities(self): self.average_sensitivity_all(sensitivity="total") self.average_sensitivity_grid(sensitivity="total") - - def plot_condensed(self, sensitivity="first"): + def plot_condensed(self, error="variance", sensitivity="first"): """ Plot the subset of data that shows all information in the most concise way, with the chosen sensitivity. Parameters ---------- + error: {"variance", "stddev"}, optional + Plot either variance or standard deviation (square root of variance). sensitivity : {"sobol_first", "first", "sobol_total", "total"}, optional Which Sobol indices to plot. "sobol_first" and "first" is the first order Sobol indices, while "sobol_total" and "total" are the total @@ -1773,21 +1948,20 @@ def plot_condensed(self, sensitivity="first"): for feature in self.data: if self.data.ndim(feature) == 1: + if error != "variance": + raise NotImplementedError("For 1D features, the standard deviation plot has not yet been implemented.") self.mean_variance_1d(feature=feature) self.prediction_interval_1d(feature=feature) if sensitivity in self.data[feature]: self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity) - self.features_0d(sensitivity=sensitivity) + self.features_0d(sensitivity=sensitivity, error=error) self.features_2d() if sensitivity is not None: self.average_sensitivity_grid(sensitivity=sensitivity) - - - def plot(self, condensed=True, sensitivity="first"): """ Plot the subset of data that shows all information in the most concise @@ -1815,17 +1989,18 @@ def plot(self, condensed=True, sensitivity="first"): if condensed: self.plot_condensed(sensitivity=sensitivity) else: - if sensitivity is "all": + if sensitivity == "all": self.plot_all_sensitivities() else: self.plot_all(sensitivity) - def average_sensitivity_grid(self, - sensitivity="first", - hardcopy=True, - show=False, - **plot_kwargs): + sensitivity="first", + hardcopy=True, + title=None, + show=False, + palette="colorblind", + **plot_kwargs): """ Plot the average of the sensitivity for all model/features in their own plots in the same figure. @@ -1859,7 +2034,7 @@ def average_sensitivity_grid(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) no_sensitivity = True for feature in self.data: @@ -1875,14 +2050,13 @@ def average_sensitivity_grid(self, nr_plots = len(self.data) grid_size = np.ceil(np.sqrt(nr_plots)) grid_x_size = int(grid_size) - grid_y_size = int(np.ceil(nr_plots/float(grid_x_size))) + grid_y_size = int(np.ceil(nr_plots / float(grid_x_size))) # plt.close("all") - set_style("seaborn-dark") + set_style("classic") fig, axes = plt.subplots(nrows=grid_y_size, ncols=grid_x_size, squeeze=False, sharex="col", sharey="row") - set_style("seaborn-white") - + set_style("classic") # Add a larger subplot to use to set a common xlabel and ylabel @@ -1891,19 +2065,18 @@ def average_sensitivity_grid(self, "right": "None", "left": "None"}) ax.tick_params(labelcolor="w", top=False, bottom=False, left=False, right=False) ax.set_xlabel("Parameters") - ax.set_ylabel("Average of " + title) + ax.set_ylabel("Average of " + title_tmp) width = 0.2 - index = np.arange(1, len(self.data.uncertain_parameters)+1)*width + index = np.arange(1, len(self.data.uncertain_parameters) + 1) * width features = list(self.data.keys()) - for i in range(0, grid_x_size*grid_y_size): + for i in range(0, grid_x_size * grid_y_size): nx = i % grid_x_size - ny = int(np.floor(i/float(grid_x_size))) + ny = int(np.floor(i / float(grid_x_size))) ax = axes[ny][nx] - if i < nr_plots: if sensitivity + "_average" not in self.data[features[i]]: msg = " Unable to plot {sensitivity}_average_grid. {sensitivity}_average of {feature} does not exist." @@ -1914,32 +2087,32 @@ def average_sensitivity_grid(self, prettyBar(self.data[features[i]][sensitivity + "_average"], title=features[i].replace("_", " "), - xlabels=self.data.uncertain_parameters, + xlabels=self.uncertain_names, nr_colors=len(self.data.uncertain_parameters), + labelfontsize=self.labelsize, index=index, - palette="husl", + palette=palette, ax=ax, **plot_kwargs) - for tick in ax.get_xticklabels(): tick.set_rotation(-30) ax.set_ylim([0, 1.05]) # ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) - ax.tick_params(labelsize=fontsize) + ax.tick_params(labelsize=self.fontsize) else: ax.set_axis_off() - title = "Average of " + title - plt.suptitle(title, fontsize=titlesize) + if title is None: + title = "Average of " + title_tmp + plt.suptitle(title, fontsize=self.titlesize) plt.tight_layout() plt.subplots_adjust(top=0.88) - if hardcopy: plt.savefig(os.path.join(self.folder, - sensitivity + "_average_grid" + self.figureformat)) + sensitivity + "_average_grid" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1948,6 +2121,188 @@ def average_sensitivity_grid(self, reset_style() + def prediction_interval_sensitivity_1d(self, + feature=None, + hardcopy=True, + show=False, + sensitivity="first", + title=None, + xscale='linear', + yscale='linear', + use_markers=False, + legend_locs=None, + legend_cols=1, + sobol_limit=.0, + palette="colorblind", + color_axes=True, + **plot_kwargs): + """ + Plot the prediction interval for a specific 1 dimensional model/feature. + + Parameters + ---------- + feature : {None, str}, optional + The name of the model/feature. If None, the name of the model is + used. Default is None. + hardcopy : bool, optional + If the plot should be saved to file. Default is True. + show : bool, optional + If the plot should be shown on screen. Default is False. + title: string, optional + Choose a customized title + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. + use_markers: bool, optional + Use markers for the sensitivity data. + **plot_kwargs, optional + Matplotlib plotting arguments. + + Raises + ------ + ValueError + If a Datafile is not loaded. + ValueError + If the model/feature is not 1 dimensional. + """ + logger = get_logger(self) + + if self.data is None: + raise ValueError("Datafile must be loaded.") + + if feature is None: + feature = self.data.model_name + + if self.data.ndim(feature) != 1: + raise ValueError("{} is not a 1D feature".format(feature)) + + if "mean" not in self.data[feature] \ + or "percentile_5" not in self.data[feature] \ + or "percentile_95" not in self.data[feature]: + msg = "E, percentile_5 and/or percentile_95 of {feature} does not exist. Unable to plot prediction interval" + logger.warning(msg.format(feature=feature)) + return + + if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: + raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) + + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) + + if sensitivity not in self.data[feature]: + msg = "{sensitivity} of {feature} does not exist. Unable to plot {sensitivity}" + logger.warning(msg.format(sensitivity=sensitivity, feature=feature)) + return + + if self.data[feature].time is None or np.all(np.isnan(self.data[feature].time)): + time = np.arange(0, len(self.data[feature].mean)) + else: + time = self.data[feature].time + + labels = self.data.get_labels(feature) + xlabel, ylabel = labels + + # plot predicition interval + if title is None: + title = feature.replace("_", " ") + ", 90% prediction interval and sensitivities" + ax = prettyPlot(time, self.data[feature].mean, title=title, + xlabel=xlabel, ylabel=ylabel, + color=0, + nr_colors=3, + palette=palette, + labelfontsize=self.labelsize, + ffontsize=self.fontsize, + **plot_kwargs) + + colors = get_current_colormap() + ax.fill_between(time, + self.data[feature].percentile_5, + self.data[feature].percentile_95, + alpha=0.5, color=colors[0], + linewidth=0) + + ax.set_xlim([min(time), max(time)]) + ax.set_xscale(xscale) + ax.set_yscale(yscale) + + # plot sensitivity + number_colors = len(self.data[feature][sensitivity]) + number_colors += 2 + ax2 = ax.twinx() + colors = sns.color_palette(palette, n_colors=number_colors) + color = 0 + color_2 = 2 + + axescolor = "black" + if color_axes: + spines_color(ax2, edges={"top": "None", "bottom": "None", + "right": colors[color_2], "left": "None"}) + axescolor = colors[color_2] + + ax2.grid(False) + ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, + color=axescolor, labelcolor=axescolor, labelsize=self.labelsize) + ax2.set_ylabel(title_tmp, color=axescolor, fontsize=self.labelsize) + + # use markers for secondary axis + marker_list = itertools.cycle(('+', 'o', '*', 'v', '<', '>', 's', 'x')) + + i = 0 + color_idx = 0 + for sens_data in self.data[feature][sensitivity]: + if not np.any(sens_data > sobol_limit): + marker = next(marker_list) + i += 1 + color_idx += 1 + continue + if use_markers is False: + marker = None + else: + marker = next(marker_list) + + ax2.plot(time, sens_data, color=colors[color_idx + 2], + linewidth=self.linewidth, antialiased=True, label=self.uncertain_names[i], + marker=marker, markeredgecolor=colors[color_idx + 2], **plot_kwargs) + i += 1 + color_idx += 1 + + if legend_locs is not None: + assert len(legend_locs) == 2, "you must provide two legend locations" + loc1 = legend_locs[0] + loc2 = legend_locs[1] + else: + loc1 = "best" + loc2 = "lower right" + + ax2.legend(loc=loc2, ncol=legend_cols) + ax2.yaxis.offsetText.set_fontsize(self.fontsize) + ax2.yaxis.offsetText.set_color(axescolor) + + ax2.spines["right"].set_visible(True) + ax2.spines["right"].set_edgecolor(axescolor) + if color_axes: + axescolor = colors[color] + ax.tick_params(axis="y", color=axescolor, labelcolor=axescolor) + ax.spines["left"].set_edgecolor(axescolor) + ax.set_ylabel(ylabel, color=axescolor, fontsize=self.labelsize) + + ax2.set_xlim([min(time), max(time)]) + ax.set_xlim([min(time), max(time)]) + ax2.set_ylim(0.0, 1.05) + + ax.legend(["Mean", "90% prediction interval"], loc=loc1) + plt.tight_layout() + + if hardcopy: + plt.savefig(os.path.join(self.folder, + feature + "_prediction-interval_sensitivity_" + sensitivity + self.figureformat), dpi=self.dpi) + + if show: + plt.show() + else: + plt.close() + + reset_style() # if __name__ == "__main__": # parser = argparse.ArgumentParser(description="Plot data") diff --git a/src/uncertainpy/plotting/prettyplot/__init__.py b/src/uncertainpy/plotting/prettyplot/__init__.py index 1f782abc..1691c0df 100644 --- a/src/uncertainpy/plotting/prettyplot/__init__.py +++ b/src/uncertainpy/plotting/prettyplot/__init__.py @@ -24,7 +24,6 @@ from .prettyplot import prettyPlot from .prettyplot import prettyBar -from .prettyplot import axis_grey from .prettyplot import titlesize from .prettyplot import fontsize from .prettyplot import labelsize @@ -33,4 +32,4 @@ from .prettyplot import ticklabelsize from .prettyplot import linewidth from .prettyplot import markersize -from .prettyplot import markeredgewidth \ No newline at end of file +from .prettyplot import markeredgewidth diff --git a/src/uncertainpy/plotting/prettyplot/prettyplot.py b/src/uncertainpy/plotting/prettyplot/prettyplot.py index beff7c81..a8b459c4 100644 --- a/src/uncertainpy/plotting/prettyplot/prettyplot.py +++ b/src/uncertainpy/plotting/prettyplot/prettyplot.py @@ -1,21 +1,23 @@ from __future__ import absolute_import, division, print_function, unicode_literals import matplotlib.pyplot as plt +import matplotlib as mpl import numpy as np import seaborn as sns -axis_grey = (0.6, 0.6, 0.6) ticksize = 5 -markersize = 8 -markeredgewidth = 1.4 +markersize = 6 +markeredgewidth = 1. figure_width = 7.08 labelsize = 10 titlesize = 12 -fontsize = 8 +fontsize = 10 ticklabelsize = 8 -linewidth = 1.4 -figsize = (figure_width, figure_width*0.75) +linewidth = 1.5 +# figsize = (figure_width, figure_width*0.75) +figsize = [6.85039, 5.1375] + def set_figuresize(): """ @@ -33,9 +35,7 @@ def set_legendstyle(): Set legend options. """ params = { - "legend.frameon": True, "legend.numpoints": 1, - "legend.scatterpoints": 1, "legend.fontsize": fontsize, "legend.handlelength": 2.2, "legend.borderpad": 0.5, @@ -45,11 +45,11 @@ def set_legendstyle(): plt.rcParams.update(params) - def set_font(): """Set font options.""" params = {"text.antialiased": True, - "font.family": "serif", + "font.family": "sans-serif", + "font.sans-serif": "Arial", "font.weight": "normal", } @@ -63,8 +63,8 @@ def set_latex_font(): params = {"text.usetex": True, "text.latex.preamble": "\\usepackage{amsmath}, \\usepackage{amssymb}", "text.antialiased": True, - # "font.family": "lmodern", - # "font.weight": "normal" + # "font.family": "lmodern", + # "font.weight": "normal" } plt.rcParams.update(params) @@ -76,7 +76,7 @@ def set_linestyle(): params = {"lines.linewidth": linewidth, "lines.linestyle": "solid", - "lines.marker": None, + "lines.marker": '.', "lines.antialiased": True, "lines.markersize": markersize, "lines.markeredgewidth": markeredgewidth, @@ -91,8 +91,8 @@ def set_tickstyle(): Set tick style options """ - params = {"xtick.color": axis_grey, - "ytick.color": axis_grey, + params = {'xtick.direction': 'out', + 'ytick.direction': 'out', "xtick.major.size": ticksize, "ytick.major.size": ticksize, "xtick.labelsize": ticklabelsize, @@ -109,12 +109,12 @@ def set_axestyle(): params = {"axes.titlesize": titlesize, "axes.labelsize": labelsize, - "axes.edgecolor": axis_grey, "axes.labelcolor": "black", "axes.linewidth": 1, "axes.spines.right": False, "axes.spines.top": False, - "axes.unicode_minus": True + "axes.unicode_minus": True, + "axes.formatter.useoffset": False } plt.rcParams.update(params) @@ -152,8 +152,8 @@ def set_grid(ax, bgcolor="#EAEAF2", linecolor="w", linestyle="-", linewidth=1.3) zorder=-10) -def spines_color(ax, edges={"top": "None", "bottom": axis_grey, - "right": "None", "left": axis_grey}): +def spines_color(ax, edges={"top": "None", + "right": "None"}): """ Set spines color @@ -181,11 +181,11 @@ def remove_ticks(ax): axis object where the ticks are removed """ ax.tick_params(axis="x", which="both", bottom=True, top=False, - labelbottom=True, color=axis_grey, labelcolor="black", + labelbottom=True, labelcolor="black", labelsize=labelsize) ax.tick_params(axis="y", which="both", right=False, left=True, - labelleft=True, color=axis_grey, labelcolor="black", + labelleft=True, labelcolor="black", labelsize=labelsize) @@ -286,7 +286,7 @@ def set_title(title, ax=None, **kwargs): ax.set_title(title, fontsize=titlesize, **kwargs) -def set_xlabel(xlabel, ax=None, color="black", **kwargs): +def set_xlabel(xlabel, ax=None, color="black", lblsize=labelsize, **kwargs): """ Set the x label @@ -308,12 +308,12 @@ def set_xlabel(xlabel, ax=None, color="black", **kwargs): Matplotlib xlabel() and set_xlabel() arguments """ if ax is None: - plt.xlabel(xlabel, fontsize=labelsize, color=color, **kwargs) + plt.xlabel(xlabel, fontsize=lblsize, color=color, **kwargs) else: - ax.set_xlabel(xlabel, fontsize=labelsize, color=color, **kwargs) + ax.set_xlabel(xlabel, fontsize=lblsize, color=color, **kwargs) -def set_ylabel(ylabel, ax=None, color="black", **kwargs): +def set_ylabel(ylabel, ax=None, color="black", lblsize=labelsize, **kwargs): """ Set the y label @@ -335,9 +335,9 @@ def set_ylabel(ylabel, ax=None, color="black", **kwargs): Matplotlib ylabel() and set_ylabel() arguments """ if ax is None: - plt.ylabel(ylabel, fontsize=labelsize, color=color, **kwargs) + plt.ylabel(ylabel, fontsize=lblsize, color=color, **kwargs) else: - ax.set_ylabel(ylabel, fontsize=labelsize, color=color, **kwargs) + ax.set_ylabel(ylabel, fontsize=lblsize, color=color, **kwargs) def set_style(style="seaborn-darkgrid", nr_colors=6, palette="hls", custom=True): @@ -445,8 +445,8 @@ def set_legend(labels, ax=None): legend = plt.legend(labels) - frame = legend.get_frame() - frame.set_facecolor(color) + #frame = legend.get_frame() + #frame.set_facecolor(color) def prettyPlot(x=[], y=None, @@ -454,7 +454,7 @@ def prettyPlot(x=[], y=None, xlabel="", ylabel="", color=None, - style="seaborn-darkgrid", + style="classic", custom_style=True, palette="hls", nr_colors=6, @@ -464,6 +464,8 @@ def prettyPlot(x=[], y=None, yerr=None, xerr=None, ecolor=None, + labelfontsize=labelsize, + ffontsize=fontsize, capsize=5, capthick=2, **kwargs): @@ -563,6 +565,13 @@ def prettyPlot(x=[], y=None, ---------- ax : matplotlib.axis object """ + global labelsize + global fontsize + if labelfontsize != labelsize: + labelsize = labelfontsize + if ffontsize != fontsize: + fontsize = ffontsize + set_style(style, nr_colors=nr_colors, palette=palette, custom=custom_style) if ax is None: @@ -583,8 +592,8 @@ def prettyPlot(x=[], y=None, remove_ticks(ax) set_title(title, ax) - set_xlabel(xlabel, ax) - set_ylabel(ylabel, ax) + set_xlabel(xlabel, ax, lblsize=labelfontsize) + set_ylabel(ylabel, ax, lblsize=labelfontsize) if y is None: y = x @@ -606,6 +615,7 @@ def prettyPlot(x=[], y=None, ecolor = ecolors[ecolor] else: ecolor = color + print("E-Color {} with type {}".format(ecolor, type(ecolor))) ax.errorbar(x, y, color=color, @@ -620,7 +630,7 @@ def prettyPlot(x=[], y=None, if custom_style: - ax.yaxis.offsetText.set_fontsize(labelsize) + ax.yaxis.offsetText.set_fontsize(labelfontsize) ax.yaxis.offsetText.set_color("black") # ax.ticklabel_format(useOffset=False) @@ -646,12 +656,13 @@ def prettyBar(x, error=None, linewidth=0, ax=None, new_figure=True, - style="seaborn-dark", + style="seaborn-v0_8-darkgrid", palette="hls", nr_colors=6, align="center", - error_kw={"ecolor": axis_grey, - "lw": 2, + labelfontsize=labelsize, + ffontsize=fontsize, + error_kw={"lw": 2, "capsize": 10, "capthick": 2}, **kwargs): @@ -736,6 +747,12 @@ def prettyBar(x, error=None, ax : matplotlib ax Object """ + global labelsize + if labelfontsize != labelsize: + labelsize = labelfontsize + global fontsize + if ffontsize != fontsize: + fontsize = ffontsize set_style(style, nr_colors=nr_colors, palette=palette, custom=custom_style) if ax is None: @@ -760,7 +777,7 @@ def prettyBar(x, error=None, if error_kw is None: - error_kw = dict(ecolor=axis_grey, lw=2, capsize=10, capthick=2) + error_kw = dict(lw=2, capsize=10, capthick=2) if color is None: colors = sns.color_palette() @@ -769,7 +786,7 @@ def prettyBar(x, error=None, ax.bar(index, x, yerr=error, color=colors, width=width, align=align, linewidth=linewidth, error_kw=error_kw, - edgecolor=axis_grey, **kwargs) + **kwargs) ax.set_xticks(xticks) ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) @@ -778,7 +795,7 @@ def prettyBar(x, error=None, # ax.set_ylim([min(y), max(y)]) set_title(title, ax) - set_ylabel(ylabel, ax) + set_ylabel(ylabel, ax, lblsize=labelsize) return ax diff --git a/src/uncertainpy/uncertainty.py b/src/uncertainpy/uncertainty.py index 58488d2a..23d7edc8 100644 --- a/src/uncertainpy/uncertainty.py +++ b/src/uncertainpy/uncertainty.py @@ -226,6 +226,7 @@ def quantify(self, figure_folder="figures", figureformat=".png", save=True, + save_samples=False, data_folder="data", filename=None, **custom_kwargs): @@ -407,6 +408,7 @@ def quantify(self, figure_folder=figure_folder, figureformat=figureformat, save=save, + save_samples=save_samples, data_folder=data_folder, filename=filename, **custom_kwargs) @@ -425,6 +427,7 @@ def quantify(self, figure_folder=figure_folder, figureformat=figureformat, save=save, + save_samples=save_samples, data_folder=data_folder, filename=filename, **custom_kwargs) @@ -438,6 +441,7 @@ def quantify(self, figureformat=figureformat, save=save, data_folder=data_folder, + save_samples=save_samples, filename=filename, seed=seed) @@ -451,6 +455,7 @@ def quantify(self, save=save, data_folder=data_folder, filename=filename, + save_samples=save_samples, seed=seed) @@ -565,6 +570,7 @@ def polynomial_chaos(self, figure_folder="figures", figureformat=".png", save=True, + save_samples=False, data_folder="data", filename=None, **custom_kwargs): @@ -708,6 +714,7 @@ def polynomial_chaos(self, quadrature_order=quadrature_order, nr_pc_mc_samples=nr_pc_mc_samples, allow_incomplete=allow_incomplete, + save_samples=save_samples, seed=seed, **custom_kwargs ) @@ -736,6 +743,7 @@ def monte_carlo(self, figureformat=".png", save=True, data_folder="data", + save_samples=False, filename=None): """ Perform an uncertainty quantification using the quasi-Monte Carlo method. @@ -832,6 +840,7 @@ def monte_carlo(self, self.data = self.uncertainty_calculations.monte_carlo(uncertain_parameters=uncertain_parameters, nr_samples=nr_samples, + save_samples=save_samples, seed=seed) self.data.backend = self.backend @@ -863,6 +872,7 @@ def polynomial_chaos_single(self, figure_folder="figures", figureformat=".png", save=True, + save_samples=False, data_folder="data", filename=None): """ @@ -1016,8 +1026,8 @@ def polynomial_chaos_single(self, nr_collocation_nodes=nr_collocation_nodes, quadrature_order=quadrature_order, nr_pc_mc_samples=nr_pc_mc_samples, - allow_incomplete=allow_incomplete, - ) + save_samples=save_samples, + allow_incomplete=allow_incomplete) data.backend = self.backend data.seed = seed @@ -1046,6 +1056,7 @@ def monte_carlo_single(self, data_folder="data", figure_folder="figures", figureformat=".png", + save_samples=False, filename=None): """ Perform an uncertainty quantification for a single parameter at the time @@ -1152,7 +1163,7 @@ def monte_carlo_single(self, logger.info("Running MC for " + uncertain_parameter) data = self.uncertainty_calculations.monte_carlo(uncertain_parameters=uncertain_parameter, - nr_samples=nr_samples) + nr_samples=nr_samples, save_samples=save_samples) data.backend = self.backend data.seed = seed @@ -1323,13 +1334,13 @@ def plot(type): tmp_folder = os.path.join(folder, uncertain_parameter) self.plotting.folder = tmp_folder - self.plotting.data = self.data[uncertain_parameter] + self.plotting.set_data(self.data[uncertain_parameter]) plot(type) else: self.plotting.folder = folder - self.plotting.data = self.data + self.plotting.set_data(self.data) plot(type) diff --git a/src/uncertainpy/utils/__init__.py b/src/uncertainpy/utils/__init__.py index 18cfbe6d..a5767f3e 100644 --- a/src/uncertainpy/utils/__init__.py +++ b/src/uncertainpy/utils/__init__.py @@ -14,3 +14,4 @@ from .logger import MyFormatter, TqdmLoggingHandler, MultiprocessLoggingHandler from .utility import lengths, none_to_nan, contains_nan from .utility import is_regular, set_nan +from .utility import create_model_parameters diff --git a/src/uncertainpy/utils/utility.py b/src/uncertainpy/utils/utility.py index 01e665ba..22ce3df7 100644 --- a/src/uncertainpy/utils/utility.py +++ b/src/uncertainpy/utils/utility.py @@ -291,3 +291,32 @@ def is_regular(values): # return values + +def create_model_parameters(nodes, uncertain_parameters): + """ + Combine nodes (values) with the uncertain parameter names to create a + list of dictionaries corresponding to the model values for each + model evaluation. + Parameters + ---------- + nodes : array + A series of different set of parameters. The model and each feature is + evaluated for each set of parameters in the series. + uncertain_parameters : list + A list of names of the uncertain parameters. + Returns + ------- + model_parameters : list + A list where each element is a dictionary with the model parameters + for a single evaluation. + """ + model_parameters = [] + for node in nodes.T: + if node.ndim == 0: + node = [node] + # New set parameters + parameters = {} + for j, parameter in enumerate(uncertain_parameters): + parameters[parameter] = node[j] + model_parameters.append(parameters) + return model_parameters diff --git a/test.py b/test.py index 01758db1..78610a9e 100644 --- a/test.py +++ b/test.py @@ -1,7 +1,11 @@ import unittest import sys import click -import collections +try: + from collections import Iterable +except ImportError: + from collections.abc import Iterable + import matplotlib matplotlib.use('Agg') @@ -26,7 +30,7 @@ def create_test_suite_parameter(testcase, parameter=False): def to_iterable(iterable): - if not isinstance(iterable, collections.Iterable): + if not isinstance(iterable, Iterable): iterable = [iterable] return iterable diff --git a/tests/testing_classes/testing_uncertainty.py b/tests/testing_classes/testing_uncertainty.py index 589f6b1b..1f738cb2 100644 --- a/tests/testing_classes/testing_uncertainty.py +++ b/tests/testing_classes/testing_uncertainty.py @@ -12,6 +12,7 @@ def polynomial_chaos(self, quadrature_order=4, nr_pc_mc_samples=10**4, allow_incomplete=False, + save_samples=False, seed=None): arguments = {} @@ -25,6 +26,7 @@ def polynomial_chaos(self, arguments["quadrature_order"] = quadrature_order arguments["nr_pc_mc_samples"] = nr_pc_mc_samples arguments["seed"] = seed + arguments["save_samples"] = save_samples arguments["allow_incomplete"] = allow_incomplete @@ -37,6 +39,7 @@ def polynomial_chaos(self, def monte_carlo(self, uncertain_parameters=None, nr_samples=10**3, + save_samples=False, seed=None): arguments = {} @@ -44,6 +47,7 @@ def monte_carlo(self, arguments["uncertain_parameters"] = uncertain_parameters arguments["seed"] = seed arguments["nr_samples"] = nr_samples + arguments["save_samples"] = save_samples data = Data(logger_level=None) data.arguments = arguments @@ -61,4 +65,4 @@ def custom_uncertainty_quantification(self, custom_keyword="custom_value"): data = Data(logger_level=None) data.arguments = arguments - return data \ No newline at end of file + return data